Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
AbstractDemoPlayer.select_sound_mode
(self, sound_mode)
Select sound mode.
Select sound mode.
def select_sound_mode(self, sound_mode): """Select sound mode.""" self._sound_mode = sound_mode self.schedule_update_ha_state()
[ "def", "select_sound_mode", "(", "self", ",", "sound_mode", ")", ":", "self", ".", "_sound_mode", "=", "sound_mode", "self", ".", "schedule_update_ha_state", "(", ")" ]
[ 203, 4 ]
[ 206, 39 ]
python
en
['en', 'mk', 'en']
True
DemoYoutubePlayer.__init__
(self, name, youtube_id=None, media_title=None, duration=360)
Initialize the demo device.
Initialize the demo device.
def __init__(self, name, youtube_id=None, media_title=None, duration=360): """Initialize the demo device.""" super().__init__(name) self.youtube_id = youtube_id self._media_title = media_title self._duration = duration self._progress = int(duration * 0.15) self._progress_updated_at = dt_util.utcnow()
[ "def", "__init__", "(", "self", ",", "name", ",", "youtube_id", "=", "None", ",", "media_title", "=", "None", ",", "duration", "=", "360", ")", ":", "super", "(", ")", ".", "__init__", "(", "name", ")", "self", ".", "youtube_id", "=", "youtube_id", "self", ".", "_media_title", "=", "media_title", "self", ".", "_duration", "=", "duration", "self", ".", "_progress", "=", "int", "(", "duration", "*", "0.15", ")", "self", ".", "_progress_updated_at", "=", "dt_util", ".", "utcnow", "(", ")" ]
[ 214, 4 ]
[ 221, 52 ]
python
en
['en', 'en', 'en']
True
DemoYoutubePlayer.media_content_id
(self)
Return the content ID of current playing media.
Return the content ID of current playing media.
def media_content_id(self): """Return the content ID of current playing media.""" return self.youtube_id
[ "def", "media_content_id", "(", "self", ")", ":", "return", "self", ".", "youtube_id" ]
[ 224, 4 ]
[ 226, 30 ]
python
en
['en', 'en', 'en']
True
DemoYoutubePlayer.media_content_type
(self)
Return the content type of current playing media.
Return the content type of current playing media.
def media_content_type(self): """Return the content type of current playing media.""" return MEDIA_TYPE_MOVIE
[ "def", "media_content_type", "(", "self", ")", ":", "return", "MEDIA_TYPE_MOVIE" ]
[ 229, 4 ]
[ 231, 31 ]
python
en
['en', 'en', 'en']
True
DemoYoutubePlayer.media_duration
(self)
Return the duration of current playing media in seconds.
Return the duration of current playing media in seconds.
def media_duration(self): """Return the duration of current playing media in seconds.""" return self._duration
[ "def", "media_duration", "(", "self", ")", ":", "return", "self", ".", "_duration" ]
[ 234, 4 ]
[ 236, 29 ]
python
en
['en', 'en', 'en']
True
DemoYoutubePlayer.media_image_url
(self)
Return the image url of current playing media.
Return the image url of current playing media.
def media_image_url(self): """Return the image url of current playing media.""" return f"https://img.youtube.com/vi/{self.youtube_id}/hqdefault.jpg"
[ "def", "media_image_url", "(", "self", ")", ":", "return", "f\"https://img.youtube.com/vi/{self.youtube_id}/hqdefault.jpg\"" ]
[ 239, 4 ]
[ 241, 76 ]
python
en
['en', 'en', 'en']
True
DemoYoutubePlayer.media_title
(self)
Return the title of current playing media.
Return the title of current playing media.
def media_title(self): """Return the title of current playing media.""" return self._media_title
[ "def", "media_title", "(", "self", ")", ":", "return", "self", ".", "_media_title" ]
[ 244, 4 ]
[ 246, 32 ]
python
en
['en', 'en', 'en']
True
DemoYoutubePlayer.app_name
(self)
Return the current running application.
Return the current running application.
def app_name(self): """Return the current running application.""" return "YouTube"
[ "def", "app_name", "(", "self", ")", ":", "return", "\"YouTube\"" ]
[ 249, 4 ]
[ 251, 24 ]
python
en
['en', 'en', 'en']
True
DemoYoutubePlayer.supported_features
(self)
Flag media player features that are supported.
Flag media player features that are supported.
def supported_features(self): """Flag media player features that are supported.""" return YOUTUBE_PLAYER_SUPPORT
[ "def", "supported_features", "(", "self", ")", ":", "return", "YOUTUBE_PLAYER_SUPPORT" ]
[ 254, 4 ]
[ 256, 37 ]
python
en
['en', 'en', 'en']
True
DemoYoutubePlayer.media_position
(self)
Position of current playing media in seconds.
Position of current playing media in seconds.
def media_position(self): """Position of current playing media in seconds.""" if self._progress is None: return None position = self._progress if self._player_state == STATE_PLAYING: position += (dt_util.utcnow() - self._progress_updated_at).total_seconds() return position
[ "def", "media_position", "(", "self", ")", ":", "if", "self", ".", "_progress", "is", "None", ":", "return", "None", "position", "=", "self", ".", "_progress", "if", "self", ".", "_player_state", "==", "STATE_PLAYING", ":", "position", "+=", "(", "dt_util", ".", "utcnow", "(", ")", "-", "self", ".", "_progress_updated_at", ")", ".", "total_seconds", "(", ")", "return", "position" ]
[ 259, 4 ]
[ 269, 23 ]
python
en
['en', 'en', 'en']
True
DemoYoutubePlayer.media_position_updated_at
(self)
When was the position of the current playing media valid. Returns value from homeassistant.util.dt.utcnow().
When was the position of the current playing media valid.
def media_position_updated_at(self): """When was the position of the current playing media valid. Returns value from homeassistant.util.dt.utcnow(). """ if self._player_state == STATE_PLAYING: return self._progress_updated_at
[ "def", "media_position_updated_at", "(", "self", ")", ":", "if", "self", ".", "_player_state", "==", "STATE_PLAYING", ":", "return", "self", ".", "_progress_updated_at" ]
[ 272, 4 ]
[ 278, 44 ]
python
en
['en', 'en', 'en']
True
DemoYoutubePlayer.play_media
(self, media_type, media_id, **kwargs)
Play a piece of media.
Play a piece of media.
def play_media(self, media_type, media_id, **kwargs): """Play a piece of media.""" self.youtube_id = media_id self.schedule_update_ha_state()
[ "def", "play_media", "(", "self", ",", "media_type", ",", "media_id", ",", "*", "*", "kwargs", ")", ":", "self", ".", "youtube_id", "=", "media_id", "self", ".", "schedule_update_ha_state", "(", ")" ]
[ 280, 4 ]
[ 283, 39 ]
python
en
['en', 'en', 'en']
True
DemoYoutubePlayer.media_pause
(self)
Send pause command.
Send pause command.
def media_pause(self): """Send pause command.""" self._progress = self.media_position self._progress_updated_at = dt_util.utcnow() super().media_pause()
[ "def", "media_pause", "(", "self", ")", ":", "self", ".", "_progress", "=", "self", ".", "media_position", "self", ".", "_progress_updated_at", "=", "dt_util", ".", "utcnow", "(", ")", "super", "(", ")", ".", "media_pause", "(", ")" ]
[ 285, 4 ]
[ 289, 29 ]
python
en
['en', 'en', 'en']
True
DemoMusicPlayer.__init__
(self)
Initialize the demo device.
Initialize the demo device.
def __init__(self): """Initialize the demo device.""" super().__init__("Walkman") self._cur_track = 0 self._repeat = REPEAT_MODE_OFF
[ "def", "__init__", "(", "self", ")", ":", "super", "(", ")", ".", "__init__", "(", "\"Walkman\"", ")", "self", ".", "_cur_track", "=", "0", "self", ".", "_repeat", "=", "REPEAT_MODE_OFF" ]
[ 320, 4 ]
[ 324, 38 ]
python
en
['en', 'en', 'en']
True
DemoMusicPlayer.media_content_id
(self)
Return the content ID of current playing media.
Return the content ID of current playing media.
def media_content_id(self): """Return the content ID of current playing media.""" return "bounzz-1"
[ "def", "media_content_id", "(", "self", ")", ":", "return", "\"bounzz-1\"" ]
[ 327, 4 ]
[ 329, 25 ]
python
en
['en', 'en', 'en']
True
DemoMusicPlayer.media_content_type
(self)
Return the content type of current playing media.
Return the content type of current playing media.
def media_content_type(self): """Return the content type of current playing media.""" return MEDIA_TYPE_MUSIC
[ "def", "media_content_type", "(", "self", ")", ":", "return", "MEDIA_TYPE_MUSIC" ]
[ 332, 4 ]
[ 334, 31 ]
python
en
['en', 'en', 'en']
True
DemoMusicPlayer.media_duration
(self)
Return the duration of current playing media in seconds.
Return the duration of current playing media in seconds.
def media_duration(self): """Return the duration of current playing media in seconds.""" return 213
[ "def", "media_duration", "(", "self", ")", ":", "return", "213" ]
[ 337, 4 ]
[ 339, 18 ]
python
en
['en', 'en', 'en']
True
DemoMusicPlayer.media_image_url
(self)
Return the image url of current playing media.
Return the image url of current playing media.
def media_image_url(self): """Return the image url of current playing media.""" return "https://graph.facebook.com/v2.5/107771475912710/picture?type=large"
[ "def", "media_image_url", "(", "self", ")", ":", "return", "\"https://graph.facebook.com/v2.5/107771475912710/picture?type=large\"" ]
[ 342, 4 ]
[ 344, 83 ]
python
en
['en', 'en', 'en']
True
DemoMusicPlayer.media_title
(self)
Return the title of current playing media.
Return the title of current playing media.
def media_title(self): """Return the title of current playing media.""" return self.tracks[self._cur_track][1] if self.tracks else ""
[ "def", "media_title", "(", "self", ")", ":", "return", "self", ".", "tracks", "[", "self", ".", "_cur_track", "]", "[", "1", "]", "if", "self", ".", "tracks", "else", "\"\"" ]
[ 347, 4 ]
[ 349, 69 ]
python
en
['en', 'en', 'en']
True
DemoMusicPlayer.media_artist
(self)
Return the artist of current playing media (Music track only).
Return the artist of current playing media (Music track only).
def media_artist(self): """Return the artist of current playing media (Music track only).""" return self.tracks[self._cur_track][0] if self.tracks else ""
[ "def", "media_artist", "(", "self", ")", ":", "return", "self", ".", "tracks", "[", "self", ".", "_cur_track", "]", "[", "0", "]", "if", "self", ".", "tracks", "else", "\"\"" ]
[ 352, 4 ]
[ 354, 69 ]
python
en
['en', 'en', 'en']
True
DemoMusicPlayer.media_album_name
(self)
Return the album of current playing media (Music track only).
Return the album of current playing media (Music track only).
def media_album_name(self): """Return the album of current playing media (Music track only).""" return "Bounzz"
[ "def", "media_album_name", "(", "self", ")", ":", "return", "\"Bounzz\"" ]
[ 357, 4 ]
[ 359, 23 ]
python
en
['en', 'en', 'en']
True
DemoMusicPlayer.media_track
(self)
Return the track number of current media (Music track only).
Return the track number of current media (Music track only).
def media_track(self): """Return the track number of current media (Music track only).""" return self._cur_track + 1
[ "def", "media_track", "(", "self", ")", ":", "return", "self", ".", "_cur_track", "+", "1" ]
[ 362, 4 ]
[ 364, 34 ]
python
en
['en', 'en', 'en']
True
DemoMusicPlayer.repeat
(self)
Return current repeat mode.
Return current repeat mode.
def repeat(self): """Return current repeat mode.""" return self._repeat
[ "def", "repeat", "(", "self", ")", ":", "return", "self", ".", "_repeat" ]
[ 367, 4 ]
[ 369, 27 ]
python
en
['en', 'la', 'en']
True
DemoMusicPlayer.supported_features
(self)
Flag media player features that are supported.
Flag media player features that are supported.
def supported_features(self): """Flag media player features that are supported.""" return MUSIC_PLAYER_SUPPORT
[ "def", "supported_features", "(", "self", ")", ":", "return", "MUSIC_PLAYER_SUPPORT" ]
[ 372, 4 ]
[ 374, 35 ]
python
en
['en', 'en', 'en']
True
DemoMusicPlayer.media_previous_track
(self)
Send previous track command.
Send previous track command.
def media_previous_track(self): """Send previous track command.""" if self._cur_track > 0: self._cur_track -= 1 self.schedule_update_ha_state()
[ "def", "media_previous_track", "(", "self", ")", ":", "if", "self", ".", "_cur_track", ">", "0", ":", "self", ".", "_cur_track", "-=", "1", "self", ".", "schedule_update_ha_state", "(", ")" ]
[ 376, 4 ]
[ 380, 43 ]
python
en
['en', 'it', 'en']
True
DemoMusicPlayer.media_next_track
(self)
Send next track command.
Send next track command.
def media_next_track(self): """Send next track command.""" if self._cur_track < len(self.tracks) - 1: self._cur_track += 1 self.schedule_update_ha_state()
[ "def", "media_next_track", "(", "self", ")", ":", "if", "self", ".", "_cur_track", "<", "len", "(", "self", ".", "tracks", ")", "-", "1", ":", "self", ".", "_cur_track", "+=", "1", "self", ".", "schedule_update_ha_state", "(", ")" ]
[ 382, 4 ]
[ 386, 43 ]
python
en
['en', 'pt', 'en']
True
DemoMusicPlayer.clear_playlist
(self)
Clear players playlist.
Clear players playlist.
def clear_playlist(self): """Clear players playlist.""" self.tracks = [] self._cur_track = 0 self._player_state = STATE_OFF self.schedule_update_ha_state()
[ "def", "clear_playlist", "(", "self", ")", ":", "self", ".", "tracks", "=", "[", "]", "self", ".", "_cur_track", "=", "0", "self", ".", "_player_state", "=", "STATE_OFF", "self", ".", "schedule_update_ha_state", "(", ")" ]
[ 388, 4 ]
[ 393, 39 ]
python
en
['fr', 'en', 'en']
True
DemoMusicPlayer.set_repeat
(self, repeat)
Enable/disable repeat mode.
Enable/disable repeat mode.
def set_repeat(self, repeat): """Enable/disable repeat mode.""" self._repeat = repeat self.schedule_update_ha_state()
[ "def", "set_repeat", "(", "self", ",", "repeat", ")", ":", "self", ".", "_repeat", "=", "repeat", "self", ".", "schedule_update_ha_state", "(", ")" ]
[ 395, 4 ]
[ 398, 39 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.__init__
(self)
Initialize the demo device.
Initialize the demo device.
def __init__(self): """Initialize the demo device.""" super().__init__("Lounge room") self._cur_episode = 1 self._episode_count = 13 self._source = "dvd" self._source_list = ["dvd", "youtube"]
[ "def", "__init__", "(", "self", ")", ":", "super", "(", ")", ".", "__init__", "(", "\"Lounge room\"", ")", "self", ".", "_cur_episode", "=", "1", "self", ".", "_episode_count", "=", "13", "self", ".", "_source", "=", "\"dvd\"", "self", ".", "_source_list", "=", "[", "\"dvd\"", ",", "\"youtube\"", "]" ]
[ 406, 4 ]
[ 412, 46 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.media_content_id
(self)
Return the content ID of current playing media.
Return the content ID of current playing media.
def media_content_id(self): """Return the content ID of current playing media.""" return "house-of-cards-1"
[ "def", "media_content_id", "(", "self", ")", ":", "return", "\"house-of-cards-1\"" ]
[ 415, 4 ]
[ 417, 33 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.media_content_type
(self)
Return the content type of current playing media.
Return the content type of current playing media.
def media_content_type(self): """Return the content type of current playing media.""" return MEDIA_TYPE_TVSHOW
[ "def", "media_content_type", "(", "self", ")", ":", "return", "MEDIA_TYPE_TVSHOW" ]
[ 420, 4 ]
[ 422, 32 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.media_duration
(self)
Return the duration of current playing media in seconds.
Return the duration of current playing media in seconds.
def media_duration(self): """Return the duration of current playing media in seconds.""" return 3600
[ "def", "media_duration", "(", "self", ")", ":", "return", "3600" ]
[ 425, 4 ]
[ 427, 19 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.media_image_url
(self)
Return the image url of current playing media.
Return the image url of current playing media.
def media_image_url(self): """Return the image url of current playing media.""" return "https://graph.facebook.com/v2.5/HouseofCards/picture?width=400"
[ "def", "media_image_url", "(", "self", ")", ":", "return", "\"https://graph.facebook.com/v2.5/HouseofCards/picture?width=400\"" ]
[ 430, 4 ]
[ 432, 79 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.media_title
(self)
Return the title of current playing media.
Return the title of current playing media.
def media_title(self): """Return the title of current playing media.""" return f"Chapter {self._cur_episode}"
[ "def", "media_title", "(", "self", ")", ":", "return", "f\"Chapter {self._cur_episode}\"" ]
[ 435, 4 ]
[ 437, 45 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.media_series_title
(self)
Return the series title of current playing media (TV Show only).
Return the series title of current playing media (TV Show only).
def media_series_title(self): """Return the series title of current playing media (TV Show only).""" return "House of Cards"
[ "def", "media_series_title", "(", "self", ")", ":", "return", "\"House of Cards\"" ]
[ 440, 4 ]
[ 442, 31 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.media_season
(self)
Return the season of current playing media (TV Show only).
Return the season of current playing media (TV Show only).
def media_season(self): """Return the season of current playing media (TV Show only).""" return 1
[ "def", "media_season", "(", "self", ")", ":", "return", "1" ]
[ 445, 4 ]
[ 447, 16 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.media_episode
(self)
Return the episode of current playing media (TV Show only).
Return the episode of current playing media (TV Show only).
def media_episode(self): """Return the episode of current playing media (TV Show only).""" return self._cur_episode
[ "def", "media_episode", "(", "self", ")", ":", "return", "self", ".", "_cur_episode" ]
[ 450, 4 ]
[ 452, 32 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.app_name
(self)
Return the current running application.
Return the current running application.
def app_name(self): """Return the current running application.""" return "Netflix"
[ "def", "app_name", "(", "self", ")", ":", "return", "\"Netflix\"" ]
[ 455, 4 ]
[ 457, 24 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.source
(self)
Return the current input source.
Return the current input source.
def source(self): """Return the current input source.""" return self._source
[ "def", "source", "(", "self", ")", ":", "return", "self", ".", "_source" ]
[ 460, 4 ]
[ 462, 27 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.source_list
(self)
List of available sources.
List of available sources.
def source_list(self): """List of available sources.""" return self._source_list
[ "def", "source_list", "(", "self", ")", ":", "return", "self", ".", "_source_list" ]
[ 465, 4 ]
[ 467, 32 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.supported_features
(self)
Flag media player features that are supported.
Flag media player features that are supported.
def supported_features(self): """Flag media player features that are supported.""" return NETFLIX_PLAYER_SUPPORT
[ "def", "supported_features", "(", "self", ")", ":", "return", "NETFLIX_PLAYER_SUPPORT" ]
[ 470, 4 ]
[ 472, 37 ]
python
en
['en', 'en', 'en']
True
DemoTVShowPlayer.media_previous_track
(self)
Send previous track command.
Send previous track command.
def media_previous_track(self): """Send previous track command.""" if self._cur_episode > 1: self._cur_episode -= 1 self.schedule_update_ha_state()
[ "def", "media_previous_track", "(", "self", ")", ":", "if", "self", ".", "_cur_episode", ">", "1", ":", "self", ".", "_cur_episode", "-=", "1", "self", ".", "schedule_update_ha_state", "(", ")" ]
[ 474, 4 ]
[ 478, 43 ]
python
en
['en', 'it', 'en']
True
DemoTVShowPlayer.media_next_track
(self)
Send next track command.
Send next track command.
def media_next_track(self): """Send next track command.""" if self._cur_episode < self._episode_count: self._cur_episode += 1 self.schedule_update_ha_state()
[ "def", "media_next_track", "(", "self", ")", ":", "if", "self", ".", "_cur_episode", "<", "self", ".", "_episode_count", ":", "self", ".", "_cur_episode", "+=", "1", "self", ".", "schedule_update_ha_state", "(", ")" ]
[ 480, 4 ]
[ 484, 43 ]
python
en
['en', 'pt', 'en']
True
DemoTVShowPlayer.select_source
(self, source)
Set the input source.
Set the input source.
def select_source(self, source): """Set the input source.""" self._source = source self.schedule_update_ha_state()
[ "def", "select_source", "(", "self", ",", "source", ")", ":", "self", ".", "_source", "=", "source", "self", ".", "schedule_update_ha_state", "(", ")" ]
[ 486, 4 ]
[ 489, 39 ]
python
en
['en', 'su', 'en']
True
test_form
(hass, mock_simple_nws_config)
Test we get the form.
Test we get the form.
async def test_form(hass, mock_simple_nws_config): """Test we get the form.""" hass.config.latitude = 35 hass.config.longitude = -90 await setup.async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["errors"] == {} with patch( "homeassistant.components.nws.async_setup", return_value=True ) as mock_setup, patch( "homeassistant.components.nws.async_setup_entry", return_value=True, ) as mock_setup_entry: result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"api_key": "test"} ) await hass.async_block_till_done() assert result2["type"] == "create_entry" assert result2["title"] == "ABC" assert result2["data"] == { "api_key": "test", "latitude": 35, "longitude": -90, "station": "ABC", } assert len(mock_setup.mock_calls) == 1 assert len(mock_setup_entry.mock_calls) == 1
[ "async", "def", "test_form", "(", "hass", ",", "mock_simple_nws_config", ")", ":", "hass", ".", "config", ".", "latitude", "=", "35", "hass", ".", "config", ".", "longitude", "=", "-", "90", "await", "setup", ".", "async_setup_component", "(", "hass", ",", "\"persistent_notification\"", ",", "{", "}", ")", "result", "=", "await", "hass", ".", "config_entries", ".", "flow", ".", "async_init", "(", "DOMAIN", ",", "context", "=", "{", "\"source\"", ":", "config_entries", ".", "SOURCE_USER", "}", ")", "assert", "result", "[", "\"type\"", "]", "==", "\"form\"", "assert", "result", "[", "\"errors\"", "]", "==", "{", "}", "with", "patch", "(", "\"homeassistant.components.nws.async_setup\"", ",", "return_value", "=", "True", ")", "as", "mock_setup", ",", "patch", "(", "\"homeassistant.components.nws.async_setup_entry\"", ",", "return_value", "=", "True", ",", ")", "as", "mock_setup_entry", ":", "result2", "=", "await", "hass", ".", "config_entries", ".", "flow", ".", "async_configure", "(", "result", "[", "\"flow_id\"", "]", ",", "{", "\"api_key\"", ":", "\"test\"", "}", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "result2", "[", "\"type\"", "]", "==", "\"create_entry\"", "assert", "result2", "[", "\"title\"", "]", "==", "\"ABC\"", "assert", "result2", "[", "\"data\"", "]", "==", "{", "\"api_key\"", ":", "\"test\"", ",", "\"latitude\"", ":", "35", ",", "\"longitude\"", ":", "-", "90", ",", "\"station\"", ":", "\"ABC\"", ",", "}", "assert", "len", "(", "mock_setup", ".", "mock_calls", ")", "==", "1", "assert", "len", "(", "mock_setup_entry", ".", "mock_calls", ")", "==", "1" ]
[ 9, 0 ]
[ 41, 48 ]
python
en
['en', 'en', 'en']
True
test_form_cannot_connect
(hass, mock_simple_nws_config)
Test we handle cannot connect error.
Test we handle cannot connect error.
async def test_form_cannot_connect(hass, mock_simple_nws_config): """Test we handle cannot connect error.""" mock_instance = mock_simple_nws_config.return_value mock_instance.set_station.side_effect = aiohttp.ClientError result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"api_key": "test"}, ) assert result2["type"] == "form" assert result2["errors"] == {"base": "cannot_connect"}
[ "async", "def", "test_form_cannot_connect", "(", "hass", ",", "mock_simple_nws_config", ")", ":", "mock_instance", "=", "mock_simple_nws_config", ".", "return_value", "mock_instance", ".", "set_station", ".", "side_effect", "=", "aiohttp", ".", "ClientError", "result", "=", "await", "hass", ".", "config_entries", ".", "flow", ".", "async_init", "(", "DOMAIN", ",", "context", "=", "{", "\"source\"", ":", "config_entries", ".", "SOURCE_USER", "}", ")", "result2", "=", "await", "hass", ".", "config_entries", ".", "flow", ".", "async_configure", "(", "result", "[", "\"flow_id\"", "]", ",", "{", "\"api_key\"", ":", "\"test\"", "}", ",", ")", "assert", "result2", "[", "\"type\"", "]", "==", "\"form\"", "assert", "result2", "[", "\"errors\"", "]", "==", "{", "\"base\"", ":", "\"cannot_connect\"", "}" ]
[ 44, 0 ]
[ 59, 58 ]
python
en
['en', 'en', 'en']
True
test_form_unknown_error
(hass, mock_simple_nws_config)
Test we handle unknown error.
Test we handle unknown error.
async def test_form_unknown_error(hass, mock_simple_nws_config): """Test we handle unknown error.""" mock_instance = mock_simple_nws_config.return_value mock_instance.set_station.side_effect = ValueError result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"api_key": "test"}, ) assert result2["type"] == "form" assert result2["errors"] == {"base": "unknown"}
[ "async", "def", "test_form_unknown_error", "(", "hass", ",", "mock_simple_nws_config", ")", ":", "mock_instance", "=", "mock_simple_nws_config", ".", "return_value", "mock_instance", ".", "set_station", ".", "side_effect", "=", "ValueError", "result", "=", "await", "hass", ".", "config_entries", ".", "flow", ".", "async_init", "(", "DOMAIN", ",", "context", "=", "{", "\"source\"", ":", "config_entries", ".", "SOURCE_USER", "}", ")", "result2", "=", "await", "hass", ".", "config_entries", ".", "flow", ".", "async_configure", "(", "result", "[", "\"flow_id\"", "]", ",", "{", "\"api_key\"", ":", "\"test\"", "}", ",", ")", "assert", "result2", "[", "\"type\"", "]", "==", "\"form\"", "assert", "result2", "[", "\"errors\"", "]", "==", "{", "\"base\"", ":", "\"unknown\"", "}" ]
[ 62, 0 ]
[ 77, 51 ]
python
en
['en', 'de', 'en']
True
test_form_already_configured
(hass, mock_simple_nws_config)
Test we handle duplicate entries.
Test we handle duplicate entries.
async def test_form_already_configured(hass, mock_simple_nws_config): """Test we handle duplicate entries.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "homeassistant.components.nws.async_setup", return_value=True ) as mock_setup, patch( "homeassistant.components.nws.async_setup_entry", return_value=True, ) as mock_setup_entry: result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"api_key": "test"}, ) await hass.async_block_till_done() assert result2["type"] == "create_entry" assert len(mock_setup.mock_calls) == 1 assert len(mock_setup_entry.mock_calls) == 1 result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "homeassistant.components.nws.async_setup", return_value=True ) as mock_setup, patch( "homeassistant.components.nws.async_setup_entry", return_value=True, ) as mock_setup_entry: result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"api_key": "test"}, ) assert result2["type"] == "abort" assert result2["reason"] == "already_configured" await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0 assert len(mock_setup_entry.mock_calls) == 0
[ "async", "def", "test_form_already_configured", "(", "hass", ",", "mock_simple_nws_config", ")", ":", "result", "=", "await", "hass", ".", "config_entries", ".", "flow", ".", "async_init", "(", "DOMAIN", ",", "context", "=", "{", "\"source\"", ":", "config_entries", ".", "SOURCE_USER", "}", ")", "with", "patch", "(", "\"homeassistant.components.nws.async_setup\"", ",", "return_value", "=", "True", ")", "as", "mock_setup", ",", "patch", "(", "\"homeassistant.components.nws.async_setup_entry\"", ",", "return_value", "=", "True", ",", ")", "as", "mock_setup_entry", ":", "result2", "=", "await", "hass", ".", "config_entries", ".", "flow", ".", "async_configure", "(", "result", "[", "\"flow_id\"", "]", ",", "{", "\"api_key\"", ":", "\"test\"", "}", ",", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "result2", "[", "\"type\"", "]", "==", "\"create_entry\"", "assert", "len", "(", "mock_setup", ".", "mock_calls", ")", "==", "1", "assert", "len", "(", "mock_setup_entry", ".", "mock_calls", ")", "==", "1", "result", "=", "await", "hass", ".", "config_entries", ".", "flow", ".", "async_init", "(", "DOMAIN", ",", "context", "=", "{", "\"source\"", ":", "config_entries", ".", "SOURCE_USER", "}", ")", "with", "patch", "(", "\"homeassistant.components.nws.async_setup\"", ",", "return_value", "=", "True", ")", "as", "mock_setup", ",", "patch", "(", "\"homeassistant.components.nws.async_setup_entry\"", ",", "return_value", "=", "True", ",", ")", "as", "mock_setup_entry", ":", "result2", "=", "await", "hass", ".", "config_entries", ".", "flow", ".", "async_configure", "(", "result", "[", "\"flow_id\"", "]", ",", "{", "\"api_key\"", ":", "\"test\"", "}", ",", ")", "assert", "result2", "[", "\"type\"", "]", "==", "\"abort\"", "assert", "result2", "[", "\"reason\"", "]", "==", "\"already_configured\"", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "len", "(", "mock_setup", ".", "mock_calls", ")", "==", "0", "assert", "len", "(", "mock_setup_entry", ".", "mock_calls", ")", "==", "0" ]
[ 80, 0 ]
[ 120, 48 ]
python
en
['fr', 'en', 'en']
True
ModelContainer.check_bounds
(self, theta)
Step 6 check for overlapping periods (within 2.5% arbitrarily chosen)
Step 6 check for overlapping periods (within 2.5% arbitrarily chosen)
def check_bounds(self, theta): for ii in range(0, self.ndim): if not (self.bounds[ii, 0] < theta[ii] < self.bounds[ii, 1]): return False period_storage = [] period_storage_ordered = np.zeros(len(self.ordered_planets)) for model_name, model in self.common_models.items(): if model.model_class == 'planet': """ Step 1: retrieve the planet period""" period = model.transformation['P'](theta, model.fixed, model.variable_index['P']) """ Step 2: save the all planet periods into a list""" period_storage.extend([period]) """ Step 3: save the period of the planet in the ordered list""" if model_name in self.ordered_planets: period_storage_ordered[self.ordered_planets[model_name]] = period # print(' ', model_name, self.ordered_planets[model_name], period_storage_ordered) """ Step 4: check if the eccentricity is within the given range""" if 'e' in model.variable_index: e = model.transformation['e'](theta, model.fixed, model.variable_index['e']) if not model.bounds['e'][0] <= e < model.bounds['e'][1]: # print('eccentricity>1') # print() return False """ Step 5: check if the impact parameter is below 1 + Rp/Rs """ if 'b' in model.variable_index and 'R' in model.variable_index: b = model.transformation['b'](theta, model.fixed, model.variable_index['b']) R = model.transformation['R'](theta, model.fixed, model.variable_index['R']) if not b <= 1 + R: return False """ Step 6 check for overlapping periods (within 2.5% arbitrarily chosen)""" for i_n, i_v in enumerate(period_storage): if i_n == len(period_storage) - 1: break if np.amin(np.abs(period_storage[i_n + 1:] - i_v)) / i_v < 0.025: # print('overlapping periods detected') # print() return False """ Step 7 check if the planet are ordered""" for i_n, i_v in enumerate(period_storage_ordered): if i_n == len(period_storage_ordered) - 1: break if np.amin(period_storage_ordered[i_n + 1:] - i_v) < 0.0: # print('inverted order detected') # print() return False return True
[ "def", "check_bounds", "(", "self", ",", "theta", ")", ":", "for", "ii", "in", "range", "(", "0", ",", "self", ".", "ndim", ")", ":", "if", "not", "(", "self", ".", "bounds", "[", "ii", ",", "0", "]", "<", "theta", "[", "ii", "]", "<", "self", ".", "bounds", "[", "ii", ",", "1", "]", ")", ":", "return", "False", "period_storage", "=", "[", "]", "period_storage_ordered", "=", "np", ".", "zeros", "(", "len", "(", "self", ".", "ordered_planets", ")", ")", "for", "model_name", ",", "model", "in", "self", ".", "common_models", ".", "items", "(", ")", ":", "if", "model", ".", "model_class", "==", "'planet'", ":", "\"\"\" Step 1: retrieve the planet period\"\"\"", "period", "=", "model", ".", "transformation", "[", "'P'", "]", "(", "theta", ",", "model", ".", "fixed", ",", "model", ".", "variable_index", "[", "'P'", "]", ")", "\"\"\" Step 2: save the all planet periods into a list\"\"\"", "period_storage", ".", "extend", "(", "[", "period", "]", ")", "\"\"\" Step 3: save the period of the planet in the ordered list\"\"\"", "if", "model_name", "in", "self", ".", "ordered_planets", ":", "period_storage_ordered", "[", "self", ".", "ordered_planets", "[", "model_name", "]", "]", "=", "period", "# print(' ', model_name, self.ordered_planets[model_name], period_storage_ordered)", "\"\"\" Step 4: check if the eccentricity is within the given range\"\"\"", "if", "'e'", "in", "model", ".", "variable_index", ":", "e", "=", "model", ".", "transformation", "[", "'e'", "]", "(", "theta", ",", "model", ".", "fixed", ",", "model", ".", "variable_index", "[", "'e'", "]", ")", "if", "not", "model", ".", "bounds", "[", "'e'", "]", "[", "0", "]", "<=", "e", "<", "model", ".", "bounds", "[", "'e'", "]", "[", "1", "]", ":", "# print('eccentricity>1')", "# print()", "return", "False", "\"\"\" Step 5: check if the impact parameter is below 1 + Rp/Rs \"\"\"", "if", "'b'", "in", "model", ".", "variable_index", "and", "'R'", "in", "model", ".", "variable_index", ":", "b", "=", "model", ".", "transformation", "[", "'b'", "]", "(", "theta", ",", "model", ".", "fixed", ",", "model", ".", "variable_index", "[", "'b'", "]", ")", "R", "=", "model", ".", "transformation", "[", "'R'", "]", "(", "theta", ",", "model", ".", "fixed", ",", "model", ".", "variable_index", "[", "'R'", "]", ")", "if", "not", "b", "<=", "1", "+", "R", ":", "return", "False", "for", "i_n", ",", "i_v", "in", "enumerate", "(", "period_storage", ")", ":", "if", "i_n", "==", "len", "(", "period_storage", ")", "-", "1", ":", "break", "if", "np", ".", "amin", "(", "np", ".", "abs", "(", "period_storage", "[", "i_n", "+", "1", ":", "]", "-", "i_v", ")", ")", "/", "i_v", "<", "0.025", ":", "# print('overlapping periods detected')", "# print()", "return", "False", "\"\"\" Step 7 check if the planet are ordered\"\"\"", "for", "i_n", ",", "i_v", "in", "enumerate", "(", "period_storage_ordered", ")", ":", "if", "i_n", "==", "len", "(", "period_storage_ordered", ")", "-", "1", ":", "break", "if", "np", ".", "amin", "(", "period_storage_ordered", "[", "i_n", "+", "1", ":", "]", "-", "i_v", ")", "<", "0.0", ":", "# print('inverted order detected')", "# print()", "return", "False", "return", "True" ]
[ 138, 4 ]
[ 203, 19 ]
python
en
['en', 'en', 'en']
True
ModelContainer.log_priors_likelihood
(self, theta, return_priors=True)
Constant term added either by dataset.model_logchi2() or gp.log_likelihood()
Constant term added either by dataset.model_logchi2() or gp.log_likelihood()
def log_priors_likelihood(self, theta, return_priors=True): log_priors = 0.00 log_likelihood = 0.00 """ Constant term added either by dataset.model_logchi2() or gp.log_likelihood() """ if not self.check_bounds(theta): if return_priors is False: return -np.inf else: return -np.inf, -np.inf if self.dynamical_model is not None: """ check if any keyword ahas get the output model from the dynamical tool we must do it here because all the planet are involved""" dynamical_output = self.dynamical_model.compute(self, theta) for model_name, model in self.common_models.items(): log_priors += model.return_priors(theta) delayed_lnlk_computation = [] for dataset_name, dataset in self.dataset_dict.items(): logchi2_gp_model = None dataset.model_reset() variable_values = dataset.convert(theta) dataset.compute(variable_values) log_priors += dataset.return_priors(theta) if 'none' in dataset.models or 'None' in dataset.models: continue if not dataset.models: continue for model_name in dataset.models: log_priors += self.models[model_name].return_priors( theta, dataset_name) variable_values = {} for common_ref in self.models[model_name].common_ref: variable_values.update( self.common_models[common_ref].convert(theta)) # try: # """ Taking the parameter values from the common models""" # for common_ref in self.models[model_name].common_ref: # variable_values.update(self.common_models[common_ref].convert(theta)) # except: # """ This model has no common model reference, i.e., it is strictly connected to the dataset""" # pass variable_values.update( self.models[model_name].convert(theta, dataset_name)) """ residuals will be computed following the definition in Dataset class """ if getattr(self.models[model_name], 'internal_likelihood', False): logchi2_gp_model = model_name continue # if getattr(self.models[model_name], 'model_class', None) is 'common_jitter': if getattr(self.models[model_name], 'jitter_model', False): dataset.jitter += self.models[model_name].compute( variable_values, dataset) continue if getattr(dataset, 'dynamical', False): dataset.external_model = dynamical_output[dataset_name] if getattr(self.models[model_name], 'unitary_model', False): dataset.unitary_model += self.models[model_name].compute( variable_values, dataset) if dataset.normalization_model is None: dataset.normalization_model = np.ones( dataset.n, dtype=np.double) elif getattr(self.models[model_name], 'normalization_model', False): if dataset.normalization_model is None: dataset.normalization_model = np.ones( dataset.n, dtype=np.double) dataset.normalization_model *= self.models[model_name].compute( variable_values, dataset) else: dataset.additive_model += self.models[model_name].compute( variable_values, dataset) dataset.compute_model() dataset.compute_residuals() """ Gaussian Process check MUST be the last one or the program will fail that's because for the GP to work we need to know the _deterministic_ part of the model (i.e. the theoretical values you get when you feed your model with the parameter values) """ if logchi2_gp_model: variable_values = {} for common_ref in self.models[logchi2_gp_model].common_ref: variable_values.update( self.common_models[common_ref].convert(theta)) variable_values.update( self.models[logchi2_gp_model].convert(theta, dataset_name)) """ GP Log-likelihood is not computed now because a single matrix must be created with the joined dataset""" if hasattr(self.models[logchi2_gp_model], 'delayed_lnlk_computation'): self.models[logchi2_gp_model].add_internal_dataset(variable_values, dataset, reset_status=delayed_lnlk_computation) delayed_lnlk_computation.append(logchi2_gp_model) else: log_likelihood += self.models[logchi2_gp_model].lnlk_compute( variable_values, dataset) else: log_likelihood += dataset.model_logchi2() """ In case there is more than one GP model """ for logchi2_gp_model in delayed_lnlk_computation: log_likelihood += self.models[logchi2_gp_model].lnlk_compute() """ check for finite log_priors and log_likelihood""" if np.isnan(log_priors) or np.isnan(log_likelihood): log_likelihood = -np.inf log_priors = -np.inf if return_priors is False: return log_likelihood else: return log_priors, log_likelihood
[ "def", "log_priors_likelihood", "(", "self", ",", "theta", ",", "return_priors", "=", "True", ")", ":", "log_priors", "=", "0.00", "log_likelihood", "=", "0.00", "if", "not", "self", ".", "check_bounds", "(", "theta", ")", ":", "if", "return_priors", "is", "False", ":", "return", "-", "np", ".", "inf", "else", ":", "return", "-", "np", ".", "inf", ",", "-", "np", ".", "inf", "if", "self", ".", "dynamical_model", "is", "not", "None", ":", "\"\"\" check if any keyword ahas get the output model from the dynamical tool\n we must do it here because all the planet are involved\"\"\"", "dynamical_output", "=", "self", ".", "dynamical_model", ".", "compute", "(", "self", ",", "theta", ")", "for", "model_name", ",", "model", "in", "self", ".", "common_models", ".", "items", "(", ")", ":", "log_priors", "+=", "model", ".", "return_priors", "(", "theta", ")", "delayed_lnlk_computation", "=", "[", "]", "for", "dataset_name", ",", "dataset", "in", "self", ".", "dataset_dict", ".", "items", "(", ")", ":", "logchi2_gp_model", "=", "None", "dataset", ".", "model_reset", "(", ")", "variable_values", "=", "dataset", ".", "convert", "(", "theta", ")", "dataset", ".", "compute", "(", "variable_values", ")", "log_priors", "+=", "dataset", ".", "return_priors", "(", "theta", ")", "if", "'none'", "in", "dataset", ".", "models", "or", "'None'", "in", "dataset", ".", "models", ":", "continue", "if", "not", "dataset", ".", "models", ":", "continue", "for", "model_name", "in", "dataset", ".", "models", ":", "log_priors", "+=", "self", ".", "models", "[", "model_name", "]", ".", "return_priors", "(", "theta", ",", "dataset_name", ")", "variable_values", "=", "{", "}", "for", "common_ref", "in", "self", ".", "models", "[", "model_name", "]", ".", "common_ref", ":", "variable_values", ".", "update", "(", "self", ".", "common_models", "[", "common_ref", "]", ".", "convert", "(", "theta", ")", ")", "# try:", "# \"\"\" Taking the parameter values from the common models\"\"\"", "# for common_ref in self.models[model_name].common_ref:", "# variable_values.update(self.common_models[common_ref].convert(theta))", "# except:", "# \"\"\" This model has no common model reference, i.e., it is strictly connected to the dataset\"\"\"", "# pass", "variable_values", ".", "update", "(", "self", ".", "models", "[", "model_name", "]", ".", "convert", "(", "theta", ",", "dataset_name", ")", ")", "\"\"\" residuals will be computed following the definition in Dataset class\n \"\"\"", "if", "getattr", "(", "self", ".", "models", "[", "model_name", "]", ",", "'internal_likelihood'", ",", "False", ")", ":", "logchi2_gp_model", "=", "model_name", "continue", "# if getattr(self.models[model_name], 'model_class', None) is 'common_jitter':", "if", "getattr", "(", "self", ".", "models", "[", "model_name", "]", ",", "'jitter_model'", ",", "False", ")", ":", "dataset", ".", "jitter", "+=", "self", ".", "models", "[", "model_name", "]", ".", "compute", "(", "variable_values", ",", "dataset", ")", "continue", "if", "getattr", "(", "dataset", ",", "'dynamical'", ",", "False", ")", ":", "dataset", ".", "external_model", "=", "dynamical_output", "[", "dataset_name", "]", "if", "getattr", "(", "self", ".", "models", "[", "model_name", "]", ",", "'unitary_model'", ",", "False", ")", ":", "dataset", ".", "unitary_model", "+=", "self", ".", "models", "[", "model_name", "]", ".", "compute", "(", "variable_values", ",", "dataset", ")", "if", "dataset", ".", "normalization_model", "is", "None", ":", "dataset", ".", "normalization_model", "=", "np", ".", "ones", "(", "dataset", ".", "n", ",", "dtype", "=", "np", ".", "double", ")", "elif", "getattr", "(", "self", ".", "models", "[", "model_name", "]", ",", "'normalization_model'", ",", "False", ")", ":", "if", "dataset", ".", "normalization_model", "is", "None", ":", "dataset", ".", "normalization_model", "=", "np", ".", "ones", "(", "dataset", ".", "n", ",", "dtype", "=", "np", ".", "double", ")", "dataset", ".", "normalization_model", "*=", "self", ".", "models", "[", "model_name", "]", ".", "compute", "(", "variable_values", ",", "dataset", ")", "else", ":", "dataset", ".", "additive_model", "+=", "self", ".", "models", "[", "model_name", "]", ".", "compute", "(", "variable_values", ",", "dataset", ")", "dataset", ".", "compute_model", "(", ")", "dataset", ".", "compute_residuals", "(", ")", "\"\"\" Gaussian Process check MUST be the last one or the program will fail\n that's because for the GP to work we need to know the _deterministic_ part of the model \n (i.e. the theoretical values you get when you feed your model with the parameter values) \"\"\"", "if", "logchi2_gp_model", ":", "variable_values", "=", "{", "}", "for", "common_ref", "in", "self", ".", "models", "[", "logchi2_gp_model", "]", ".", "common_ref", ":", "variable_values", ".", "update", "(", "self", ".", "common_models", "[", "common_ref", "]", ".", "convert", "(", "theta", ")", ")", "variable_values", ".", "update", "(", "self", ".", "models", "[", "logchi2_gp_model", "]", ".", "convert", "(", "theta", ",", "dataset_name", ")", ")", "\"\"\" GP Log-likelihood is not computed now because a single matrix must be created with \n the joined dataset\"\"\"", "if", "hasattr", "(", "self", ".", "models", "[", "logchi2_gp_model", "]", ",", "'delayed_lnlk_computation'", ")", ":", "self", ".", "models", "[", "logchi2_gp_model", "]", ".", "add_internal_dataset", "(", "variable_values", ",", "dataset", ",", "reset_status", "=", "delayed_lnlk_computation", ")", "delayed_lnlk_computation", ".", "append", "(", "logchi2_gp_model", ")", "else", ":", "log_likelihood", "+=", "self", ".", "models", "[", "logchi2_gp_model", "]", ".", "lnlk_compute", "(", "variable_values", ",", "dataset", ")", "else", ":", "log_likelihood", "+=", "dataset", ".", "model_logchi2", "(", ")", "\"\"\" In case there is more than one GP model \"\"\"", "for", "logchi2_gp_model", "in", "delayed_lnlk_computation", ":", "log_likelihood", "+=", "self", ".", "models", "[", "logchi2_gp_model", "]", ".", "lnlk_compute", "(", ")", "\"\"\" check for finite log_priors and log_likelihood\"\"\"", "if", "np", ".", "isnan", "(", "log_priors", ")", "or", "np", ".", "isnan", "(", "log_likelihood", ")", ":", "log_likelihood", "=", "-", "np", ".", "inf", "log_priors", "=", "-", "np", ".", "inf", "if", "return_priors", "is", "False", ":", "return", "log_likelihood", "else", ":", "return", "log_priors", ",", "log_likelihood" ]
[ 213, 4 ]
[ 349, 45 ]
python
en
['en', 'ja', 'th']
False
icloud_bypass_setup_fixture
()
Mock component setup.
Mock component setup.
def icloud_bypass_setup_fixture(): """Mock component setup.""" with patch("homeassistant.components.icloud.async_setup_entry", return_value=True): yield
[ "def", "icloud_bypass_setup_fixture", "(", ")", ":", "with", "patch", "(", "\"homeassistant.components.icloud.async_setup_entry\"", ",", "return_value", "=", "True", ")", ":", "yield" ]
[ 7, 0 ]
[ 10, 13 ]
python
en
['en', 'bg', 'en']
True
setup_platform
(hass, config, add_entities, discovery_info=None)
Set up the Tahoma covers.
Set up the Tahoma covers.
def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Tahoma covers.""" if discovery_info is None: return controller = hass.data[TAHOMA_DOMAIN]["controller"] devices = [] for device in hass.data[TAHOMA_DOMAIN]["devices"]["cover"]: devices.append(TahomaCover(device, controller)) add_entities(devices, True)
[ "def", "setup_platform", "(", "hass", ",", "config", ",", "add_entities", ",", "discovery_info", "=", "None", ")", ":", "if", "discovery_info", "is", "None", ":", "return", "controller", "=", "hass", ".", "data", "[", "TAHOMA_DOMAIN", "]", "[", "\"controller\"", "]", "devices", "=", "[", "]", "for", "device", "in", "hass", ".", "data", "[", "TAHOMA_DOMAIN", "]", "[", "\"devices\"", "]", "[", "\"cover\"", "]", ":", "devices", ".", "append", "(", "TahomaCover", "(", "device", ",", "controller", ")", ")", "add_entities", "(", "devices", ",", "True", ")" ]
[ 52, 0 ]
[ 60, 31 ]
python
en
['en', 'en', 'en']
True
TahomaCover.__init__
(self, tahoma_device, controller)
Initialize the device.
Initialize the device.
def __init__(self, tahoma_device, controller): """Initialize the device.""" super().__init__(tahoma_device, controller) self._closure = 0 # 100 equals open self._position = 100 self._closed = False self._rssi_level = None self._icon = None # Can be 0 and bigger self._lock_timer = 0 self._lock_start_ts = None self._lock_end_ts = None # Can be 'comfortLevel1', 'comfortLevel2', 'comfortLevel3', # 'comfortLevel4', 'environmentProtection', 'humanProtection', # 'userLevel1', 'userLevel2' self._lock_level = None # Can be 'LSC', 'SAAC', 'SFC', 'UPS', 'externalGateway', 'localUser', # 'myself', 'rain', 'security', 'temperature', 'timer', 'user', 'wind' self._lock_originator = None
[ "def", "__init__", "(", "self", ",", "tahoma_device", ",", "controller", ")", ":", "super", "(", ")", ".", "__init__", "(", "tahoma_device", ",", "controller", ")", "self", ".", "_closure", "=", "0", "# 100 equals open", "self", ".", "_position", "=", "100", "self", ".", "_closed", "=", "False", "self", ".", "_rssi_level", "=", "None", "self", ".", "_icon", "=", "None", "# Can be 0 and bigger", "self", ".", "_lock_timer", "=", "0", "self", ".", "_lock_start_ts", "=", "None", "self", ".", "_lock_end_ts", "=", "None", "# Can be 'comfortLevel1', 'comfortLevel2', 'comfortLevel3',", "# 'comfortLevel4', 'environmentProtection', 'humanProtection',", "# 'userLevel1', 'userLevel2'", "self", ".", "_lock_level", "=", "None", "# Can be 'LSC', 'SAAC', 'SFC', 'UPS', 'externalGateway', 'localUser',", "# 'myself', 'rain', 'security', 'temperature', 'timer', 'user', 'wind'", "self", ".", "_lock_originator", "=", "None" ]
[ 66, 4 ]
[ 86, 36 ]
python
en
['en', 'en', 'en']
True
TahomaCover.update
(self)
Update method.
Update method.
def update(self): """Update method.""" self.controller.get_states([self.tahoma_device]) # For vertical covers self._closure = self.tahoma_device.active_states.get("core:ClosureState") # For horizontal covers if self._closure is None: self._closure = self.tahoma_device.active_states.get("core:DeploymentState") # For all, if available if "core:PriorityLockTimerState" in self.tahoma_device.active_states: old_lock_timer = self._lock_timer self._lock_timer = self.tahoma_device.active_states[ "core:PriorityLockTimerState" ] # Derive timestamps from _lock_timer, only if not already set or # something has changed if self._lock_timer > 0: _LOGGER.debug("Update %s, lock_timer: %d", self._name, self._lock_timer) if self._lock_start_ts is None: self._lock_start_ts = utcnow() if self._lock_end_ts is None or old_lock_timer != self._lock_timer: self._lock_end_ts = utcnow() + timedelta(seconds=self._lock_timer) else: self._lock_start_ts = None self._lock_end_ts = None else: self._lock_timer = 0 self._lock_start_ts = None self._lock_end_ts = None self._lock_level = self.tahoma_device.active_states.get( "io:PriorityLockLevelState" ) self._lock_originator = self.tahoma_device.active_states.get( "io:PriorityLockOriginatorState" ) self._rssi_level = self.tahoma_device.active_states.get("core:RSSILevelState") # Define which icon to use if self._lock_timer > 0: if self._lock_originator == "wind": self._icon = "mdi:weather-windy" else: self._icon = "mdi:lock-alert" else: self._icon = None # Define current position. # _position: 0 is closed, 100 is fully open. # 'core:ClosureState': 100 is closed, 0 is fully open. if self._closure is not None: if self.tahoma_device.type == HORIZONTAL_AWNING: self._position = self._closure else: self._position = 100 - self._closure if self._position <= 5: self._position = 0 if self._position >= 95: self._position = 100 self._closed = self._position == 0 else: self._position = None if "core:OpenClosedState" in self.tahoma_device.active_states: self._closed = ( self.tahoma_device.active_states["core:OpenClosedState"] == "closed" ) if "core:OpenClosedPartialState" in self.tahoma_device.active_states: self._closed = ( self.tahoma_device.active_states["core:OpenClosedPartialState"] == "closed" ) else: self._closed = False _LOGGER.debug("Update %s, position: %d", self._name, self._position)
[ "def", "update", "(", "self", ")", ":", "self", ".", "controller", ".", "get_states", "(", "[", "self", ".", "tahoma_device", "]", ")", "# For vertical covers", "self", ".", "_closure", "=", "self", ".", "tahoma_device", ".", "active_states", ".", "get", "(", "\"core:ClosureState\"", ")", "# For horizontal covers", "if", "self", ".", "_closure", "is", "None", ":", "self", ".", "_closure", "=", "self", ".", "tahoma_device", ".", "active_states", ".", "get", "(", "\"core:DeploymentState\"", ")", "# For all, if available", "if", "\"core:PriorityLockTimerState\"", "in", "self", ".", "tahoma_device", ".", "active_states", ":", "old_lock_timer", "=", "self", ".", "_lock_timer", "self", ".", "_lock_timer", "=", "self", ".", "tahoma_device", ".", "active_states", "[", "\"core:PriorityLockTimerState\"", "]", "# Derive timestamps from _lock_timer, only if not already set or", "# something has changed", "if", "self", ".", "_lock_timer", ">", "0", ":", "_LOGGER", ".", "debug", "(", "\"Update %s, lock_timer: %d\"", ",", "self", ".", "_name", ",", "self", ".", "_lock_timer", ")", "if", "self", ".", "_lock_start_ts", "is", "None", ":", "self", ".", "_lock_start_ts", "=", "utcnow", "(", ")", "if", "self", ".", "_lock_end_ts", "is", "None", "or", "old_lock_timer", "!=", "self", ".", "_lock_timer", ":", "self", ".", "_lock_end_ts", "=", "utcnow", "(", ")", "+", "timedelta", "(", "seconds", "=", "self", ".", "_lock_timer", ")", "else", ":", "self", ".", "_lock_start_ts", "=", "None", "self", ".", "_lock_end_ts", "=", "None", "else", ":", "self", ".", "_lock_timer", "=", "0", "self", ".", "_lock_start_ts", "=", "None", "self", ".", "_lock_end_ts", "=", "None", "self", ".", "_lock_level", "=", "self", ".", "tahoma_device", ".", "active_states", ".", "get", "(", "\"io:PriorityLockLevelState\"", ")", "self", ".", "_lock_originator", "=", "self", ".", "tahoma_device", ".", "active_states", ".", "get", "(", "\"io:PriorityLockOriginatorState\"", ")", "self", ".", "_rssi_level", "=", "self", ".", "tahoma_device", ".", "active_states", ".", "get", "(", "\"core:RSSILevelState\"", ")", "# Define which icon to use", "if", "self", ".", "_lock_timer", ">", "0", ":", "if", "self", ".", "_lock_originator", "==", "\"wind\"", ":", "self", ".", "_icon", "=", "\"mdi:weather-windy\"", "else", ":", "self", ".", "_icon", "=", "\"mdi:lock-alert\"", "else", ":", "self", ".", "_icon", "=", "None", "# Define current position.", "# _position: 0 is closed, 100 is fully open.", "# 'core:ClosureState': 100 is closed, 0 is fully open.", "if", "self", ".", "_closure", "is", "not", "None", ":", "if", "self", ".", "tahoma_device", ".", "type", "==", "HORIZONTAL_AWNING", ":", "self", ".", "_position", "=", "self", ".", "_closure", "else", ":", "self", ".", "_position", "=", "100", "-", "self", ".", "_closure", "if", "self", ".", "_position", "<=", "5", ":", "self", ".", "_position", "=", "0", "if", "self", ".", "_position", ">=", "95", ":", "self", ".", "_position", "=", "100", "self", ".", "_closed", "=", "self", ".", "_position", "==", "0", "else", ":", "self", ".", "_position", "=", "None", "if", "\"core:OpenClosedState\"", "in", "self", ".", "tahoma_device", ".", "active_states", ":", "self", ".", "_closed", "=", "(", "self", ".", "tahoma_device", ".", "active_states", "[", "\"core:OpenClosedState\"", "]", "==", "\"closed\"", ")", "if", "\"core:OpenClosedPartialState\"", "in", "self", ".", "tahoma_device", ".", "active_states", ":", "self", ".", "_closed", "=", "(", "self", ".", "tahoma_device", ".", "active_states", "[", "\"core:OpenClosedPartialState\"", "]", "==", "\"closed\"", ")", "else", ":", "self", ".", "_closed", "=", "False", "_LOGGER", ".", "debug", "(", "\"Update %s, position: %d\"", ",", "self", ".", "_name", ",", "self", ".", "_position", ")" ]
[ 88, 4 ]
[ 166, 76 ]
python
en
['en', 'nl', 'en']
False
TahomaCover.current_cover_position
(self)
Return current position of cover.
Return current position of cover.
def current_cover_position(self): """Return current position of cover.""" return self._position
[ "def", "current_cover_position", "(", "self", ")", ":", "return", "self", ".", "_position" ]
[ 169, 4 ]
[ 171, 29 ]
python
en
['en', 'en', 'en']
True
TahomaCover.set_cover_position
(self, **kwargs)
Move the cover to a specific position.
Move the cover to a specific position.
def set_cover_position(self, **kwargs): """Move the cover to a specific position.""" if self.tahoma_device.type == "io:WindowOpenerVeluxIOComponent": command = "setClosure" else: command = "setPosition" if self.tahoma_device.type == HORIZONTAL_AWNING: self.apply_action(command, kwargs.get(ATTR_POSITION, 0)) else: self.apply_action(command, 100 - kwargs.get(ATTR_POSITION, 0))
[ "def", "set_cover_position", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "tahoma_device", ".", "type", "==", "\"io:WindowOpenerVeluxIOComponent\"", ":", "command", "=", "\"setClosure\"", "else", ":", "command", "=", "\"setPosition\"", "if", "self", ".", "tahoma_device", ".", "type", "==", "HORIZONTAL_AWNING", ":", "self", ".", "apply_action", "(", "command", ",", "kwargs", ".", "get", "(", "ATTR_POSITION", ",", "0", ")", ")", "else", ":", "self", ".", "apply_action", "(", "command", ",", "100", "-", "kwargs", ".", "get", "(", "ATTR_POSITION", ",", "0", ")", ")" ]
[ 173, 4 ]
[ 183, 74 ]
python
en
['en', 'en', 'en']
True
TahomaCover.is_closed
(self)
Return if the cover is closed.
Return if the cover is closed.
def is_closed(self): """Return if the cover is closed.""" return self._closed
[ "def", "is_closed", "(", "self", ")", ":", "return", "self", ".", "_closed" ]
[ 186, 4 ]
[ 188, 27 ]
python
en
['en', 'en', 'en']
True
TahomaCover.device_class
(self)
Return the class of the device.
Return the class of the device.
def device_class(self): """Return the class of the device.""" return TAHOMA_DEVICE_CLASSES.get(self.tahoma_device.type)
[ "def", "device_class", "(", "self", ")", ":", "return", "TAHOMA_DEVICE_CLASSES", ".", "get", "(", "self", ".", "tahoma_device", ".", "type", ")" ]
[ 191, 4 ]
[ 193, 65 ]
python
en
['en', 'en', 'en']
True
TahomaCover.device_state_attributes
(self)
Return the device state attributes.
Return the device state attributes.
def device_state_attributes(self): """Return the device state attributes.""" attr = {} super_attr = super().device_state_attributes if super_attr is not None: attr.update(super_attr) if "core:Memorized1PositionState" in self.tahoma_device.active_states: attr[ATTR_MEM_POS] = self.tahoma_device.active_states[ "core:Memorized1PositionState" ] if self._rssi_level is not None: attr[ATTR_RSSI_LEVEL] = self._rssi_level if self._lock_start_ts is not None: attr[ATTR_LOCK_START_TS] = self._lock_start_ts.isoformat() if self._lock_end_ts is not None: attr[ATTR_LOCK_END_TS] = self._lock_end_ts.isoformat() if self._lock_level is not None: attr[ATTR_LOCK_LEVEL] = self._lock_level if self._lock_originator is not None: attr[ATTR_LOCK_ORIG] = self._lock_originator return attr
[ "def", "device_state_attributes", "(", "self", ")", ":", "attr", "=", "{", "}", "super_attr", "=", "super", "(", ")", ".", "device_state_attributes", "if", "super_attr", "is", "not", "None", ":", "attr", ".", "update", "(", "super_attr", ")", "if", "\"core:Memorized1PositionState\"", "in", "self", ".", "tahoma_device", ".", "active_states", ":", "attr", "[", "ATTR_MEM_POS", "]", "=", "self", ".", "tahoma_device", ".", "active_states", "[", "\"core:Memorized1PositionState\"", "]", "if", "self", ".", "_rssi_level", "is", "not", "None", ":", "attr", "[", "ATTR_RSSI_LEVEL", "]", "=", "self", ".", "_rssi_level", "if", "self", ".", "_lock_start_ts", "is", "not", "None", ":", "attr", "[", "ATTR_LOCK_START_TS", "]", "=", "self", ".", "_lock_start_ts", ".", "isoformat", "(", ")", "if", "self", ".", "_lock_end_ts", "is", "not", "None", ":", "attr", "[", "ATTR_LOCK_END_TS", "]", "=", "self", ".", "_lock_end_ts", ".", "isoformat", "(", ")", "if", "self", ".", "_lock_level", "is", "not", "None", ":", "attr", "[", "ATTR_LOCK_LEVEL", "]", "=", "self", ".", "_lock_level", "if", "self", ".", "_lock_originator", "is", "not", "None", ":", "attr", "[", "ATTR_LOCK_ORIG", "]", "=", "self", ".", "_lock_originator", "return", "attr" ]
[ 196, 4 ]
[ 217, 19 ]
python
en
['en', 'en', 'en']
True
TahomaCover.icon
(self)
Return the icon to use in the frontend, if any.
Return the icon to use in the frontend, if any.
def icon(self): """Return the icon to use in the frontend, if any.""" return self._icon
[ "def", "icon", "(", "self", ")", ":", "return", "self", ".", "_icon" ]
[ 220, 4 ]
[ 222, 25 ]
python
en
['en', 'en', 'en']
True
TahomaCover.open_cover
(self, **kwargs)
Open the cover.
Open the cover.
def open_cover(self, **kwargs): """Open the cover.""" self.apply_action("open")
[ "def", "open_cover", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "apply_action", "(", "\"open\"", ")" ]
[ 224, 4 ]
[ 226, 33 ]
python
en
['en', 'en', 'en']
True
TahomaCover.close_cover
(self, **kwargs)
Close the cover.
Close the cover.
def close_cover(self, **kwargs): """Close the cover.""" self.apply_action("close")
[ "def", "close_cover", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "apply_action", "(", "\"close\"", ")" ]
[ 228, 4 ]
[ 230, 34 ]
python
en
['en', 'en', 'en']
True
TahomaCover.stop_cover
(self, **kwargs)
Stop the cover.
Stop the cover.
def stop_cover(self, **kwargs): """Stop the cover.""" if ( self.tahoma_device.type == "io:RollerShutterWithLowSpeedManagementIOComponent" ): self.apply_action("setPosition", "secured") elif self.tahoma_device.type in { "io:ExteriorVenetianBlindIOComponent", "rts:BlindRTSComponent", "rts:DualCurtainRTSComponent", "rts:ExteriorVenetianBlindRTSComponent", "rts:VenetianBlindRTSComponent", }: self.apply_action("my") elif self.tahoma_device.type in { HORIZONTAL_AWNING, "io:AwningValanceIOComponent", "io:RollerShutterGenericIOComponent", "io:VerticalExteriorAwningIOComponent", "io:VerticalInteriorBlindVeluxIOComponent", "io:WindowOpenerVeluxIOComponent", }: self.apply_action("stop") else: self.apply_action("stopIdentify")
[ "def", "stop_cover", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "(", "self", ".", "tahoma_device", ".", "type", "==", "\"io:RollerShutterWithLowSpeedManagementIOComponent\"", ")", ":", "self", ".", "apply_action", "(", "\"setPosition\"", ",", "\"secured\"", ")", "elif", "self", ".", "tahoma_device", ".", "type", "in", "{", "\"io:ExteriorVenetianBlindIOComponent\"", ",", "\"rts:BlindRTSComponent\"", ",", "\"rts:DualCurtainRTSComponent\"", ",", "\"rts:ExteriorVenetianBlindRTSComponent\"", ",", "\"rts:VenetianBlindRTSComponent\"", ",", "}", ":", "self", ".", "apply_action", "(", "\"my\"", ")", "elif", "self", ".", "tahoma_device", ".", "type", "in", "{", "HORIZONTAL_AWNING", ",", "\"io:AwningValanceIOComponent\"", ",", "\"io:RollerShutterGenericIOComponent\"", ",", "\"io:VerticalExteriorAwningIOComponent\"", ",", "\"io:VerticalInteriorBlindVeluxIOComponent\"", ",", "\"io:WindowOpenerVeluxIOComponent\"", ",", "}", ":", "self", ".", "apply_action", "(", "\"stop\"", ")", "else", ":", "self", ".", "apply_action", "(", "\"stopIdentify\"", ")" ]
[ 232, 4 ]
[ 257, 45 ]
python
en
['en', 'en', 'en']
True
setup
(hass, base_config)
Set up the Lutron component.
Set up the Lutron component.
def setup(hass, base_config): """Set up the Lutron component.""" hass.data[LUTRON_BUTTONS] = [] hass.data[LUTRON_CONTROLLER] = None hass.data[LUTRON_DEVICES] = { "light": [], "cover": [], "switch": [], "scene": [], "binary_sensor": [], } config = base_config.get(DOMAIN) hass.data[LUTRON_CONTROLLER] = Lutron( config[CONF_HOST], config[CONF_USERNAME], config[CONF_PASSWORD] ) hass.data[LUTRON_CONTROLLER].load_xml_db() hass.data[LUTRON_CONTROLLER].connect() _LOGGER.info("Connected to main repeater at %s", config[CONF_HOST]) # Sort our devices into types for area in hass.data[LUTRON_CONTROLLER].areas: for output in area.outputs: if output.type == "SYSTEM_SHADE": hass.data[LUTRON_DEVICES]["cover"].append((area.name, output)) elif output.is_dimmable: hass.data[LUTRON_DEVICES]["light"].append((area.name, output)) else: hass.data[LUTRON_DEVICES]["switch"].append((area.name, output)) for keypad in area.keypads: for button in keypad.buttons: # If the button has a function assigned to it, add it as a scene if button.name != "Unknown Button" and button.button_type in ( "SingleAction", "Toggle", "SingleSceneRaiseLower", "MasterRaiseLower", ): # Associate an LED with a button if there is one led = next( (led for led in keypad.leds if led.number == button.number), None, ) hass.data[LUTRON_DEVICES]["scene"].append( (area.name, keypad.name, button, led) ) hass.data[LUTRON_BUTTONS].append( LutronButton(hass, area.name, keypad, button) ) if area.occupancy_group is not None: hass.data[LUTRON_DEVICES]["binary_sensor"].append( (area.name, area.occupancy_group) ) for component in ("light", "cover", "switch", "scene", "binary_sensor"): discovery.load_platform(hass, component, DOMAIN, {}, base_config) return True
[ "def", "setup", "(", "hass", ",", "base_config", ")", ":", "hass", ".", "data", "[", "LUTRON_BUTTONS", "]", "=", "[", "]", "hass", ".", "data", "[", "LUTRON_CONTROLLER", "]", "=", "None", "hass", ".", "data", "[", "LUTRON_DEVICES", "]", "=", "{", "\"light\"", ":", "[", "]", ",", "\"cover\"", ":", "[", "]", ",", "\"switch\"", ":", "[", "]", ",", "\"scene\"", ":", "[", "]", ",", "\"binary_sensor\"", ":", "[", "]", ",", "}", "config", "=", "base_config", ".", "get", "(", "DOMAIN", ")", "hass", ".", "data", "[", "LUTRON_CONTROLLER", "]", "=", "Lutron", "(", "config", "[", "CONF_HOST", "]", ",", "config", "[", "CONF_USERNAME", "]", ",", "config", "[", "CONF_PASSWORD", "]", ")", "hass", ".", "data", "[", "LUTRON_CONTROLLER", "]", ".", "load_xml_db", "(", ")", "hass", ".", "data", "[", "LUTRON_CONTROLLER", "]", ".", "connect", "(", ")", "_LOGGER", ".", "info", "(", "\"Connected to main repeater at %s\"", ",", "config", "[", "CONF_HOST", "]", ")", "# Sort our devices into types", "for", "area", "in", "hass", ".", "data", "[", "LUTRON_CONTROLLER", "]", ".", "areas", ":", "for", "output", "in", "area", ".", "outputs", ":", "if", "output", ".", "type", "==", "\"SYSTEM_SHADE\"", ":", "hass", ".", "data", "[", "LUTRON_DEVICES", "]", "[", "\"cover\"", "]", ".", "append", "(", "(", "area", ".", "name", ",", "output", ")", ")", "elif", "output", ".", "is_dimmable", ":", "hass", ".", "data", "[", "LUTRON_DEVICES", "]", "[", "\"light\"", "]", ".", "append", "(", "(", "area", ".", "name", ",", "output", ")", ")", "else", ":", "hass", ".", "data", "[", "LUTRON_DEVICES", "]", "[", "\"switch\"", "]", ".", "append", "(", "(", "area", ".", "name", ",", "output", ")", ")", "for", "keypad", "in", "area", ".", "keypads", ":", "for", "button", "in", "keypad", ".", "buttons", ":", "# If the button has a function assigned to it, add it as a scene", "if", "button", ".", "name", "!=", "\"Unknown Button\"", "and", "button", ".", "button_type", "in", "(", "\"SingleAction\"", ",", "\"Toggle\"", ",", "\"SingleSceneRaiseLower\"", ",", "\"MasterRaiseLower\"", ",", ")", ":", "# Associate an LED with a button if there is one", "led", "=", "next", "(", "(", "led", "for", "led", "in", "keypad", ".", "leds", "if", "led", ".", "number", "==", "button", ".", "number", ")", ",", "None", ",", ")", "hass", ".", "data", "[", "LUTRON_DEVICES", "]", "[", "\"scene\"", "]", ".", "append", "(", "(", "area", ".", "name", ",", "keypad", ".", "name", ",", "button", ",", "led", ")", ")", "hass", ".", "data", "[", "LUTRON_BUTTONS", "]", ".", "append", "(", "LutronButton", "(", "hass", ",", "area", ".", "name", ",", "keypad", ",", "button", ")", ")", "if", "area", ".", "occupancy_group", "is", "not", "None", ":", "hass", ".", "data", "[", "LUTRON_DEVICES", "]", "[", "\"binary_sensor\"", "]", ".", "append", "(", "(", "area", ".", "name", ",", "area", ".", "occupancy_group", ")", ")", "for", "component", "in", "(", "\"light\"", ",", "\"cover\"", ",", "\"switch\"", ",", "\"scene\"", ",", "\"binary_sensor\"", ")", ":", "discovery", ".", "load_platform", "(", "hass", ",", "component", ",", "DOMAIN", ",", "{", "}", ",", "base_config", ")", "return", "True" ]
[ 38, 0 ]
[ 96, 15 ]
python
en
['en', 'en', 'en']
True
LutronDevice.__init__
(self, area_name, lutron_device, controller)
Initialize the device.
Initialize the device.
def __init__(self, area_name, lutron_device, controller): """Initialize the device.""" self._lutron_device = lutron_device self._controller = controller self._area_name = area_name
[ "def", "__init__", "(", "self", ",", "area_name", ",", "lutron_device", ",", "controller", ")", ":", "self", ".", "_lutron_device", "=", "lutron_device", "self", ".", "_controller", "=", "controller", "self", ".", "_area_name", "=", "area_name" ]
[ 102, 4 ]
[ 106, 35 ]
python
en
['en', 'en', 'en']
True
LutronDevice.async_added_to_hass
(self)
Register callbacks.
Register callbacks.
async def async_added_to_hass(self): """Register callbacks.""" self.hass.async_add_executor_job( self._lutron_device.subscribe, self._update_callback, None )
[ "async", "def", "async_added_to_hass", "(", "self", ")", ":", "self", ".", "hass", ".", "async_add_executor_job", "(", "self", ".", "_lutron_device", ".", "subscribe", ",", "self", ".", "_update_callback", ",", "None", ")" ]
[ 108, 4 ]
[ 112, 9 ]
python
en
['en', 'no', 'en']
False
LutronDevice._update_callback
(self, _device, _context, _event, _params)
Run when invoked by pylutron when the device state changes.
Run when invoked by pylutron when the device state changes.
def _update_callback(self, _device, _context, _event, _params): """Run when invoked by pylutron when the device state changes.""" self.schedule_update_ha_state()
[ "def", "_update_callback", "(", "self", ",", "_device", ",", "_context", ",", "_event", ",", "_params", ")", ":", "self", ".", "schedule_update_ha_state", "(", ")" ]
[ 114, 4 ]
[ 116, 39 ]
python
en
['en', 'en', 'en']
True
LutronDevice.name
(self)
Return the name of the device.
Return the name of the device.
def name(self): """Return the name of the device.""" return f"{self._area_name} {self._lutron_device.name}"
[ "def", "name", "(", "self", ")", ":", "return", "f\"{self._area_name} {self._lutron_device.name}\"" ]
[ 119, 4 ]
[ 121, 62 ]
python
en
['en', 'en', 'en']
True
LutronDevice.should_poll
(self)
No polling needed.
No polling needed.
def should_poll(self): """No polling needed.""" return False
[ "def", "should_poll", "(", "self", ")", ":", "return", "False" ]
[ 124, 4 ]
[ 126, 20 ]
python
en
['en', 'en', 'en']
True
LutronButton.__init__
(self, hass, area_name, keypad, button)
Register callback for activity on the button.
Register callback for activity on the button.
def __init__(self, hass, area_name, keypad, button): """Register callback for activity on the button.""" name = f"{keypad.name}: {button.name}" self._hass = hass self._has_release_event = ( button.button_type is not None and "RaiseLower" in button.button_type ) self._id = slugify(name) self._keypad = keypad self._area_name = area_name self._button_name = button.name self._button = button self._event = "lutron_event" self._full_id = slugify(f"{area_name} {keypad.name}: {button.name}") button.subscribe(self.button_callback, None)
[ "def", "__init__", "(", "self", ",", "hass", ",", "area_name", ",", "keypad", ",", "button", ")", ":", "name", "=", "f\"{keypad.name}: {button.name}\"", "self", ".", "_hass", "=", "hass", "self", ".", "_has_release_event", "=", "(", "button", ".", "button_type", "is", "not", "None", "and", "\"RaiseLower\"", "in", "button", ".", "button_type", ")", "self", ".", "_id", "=", "slugify", "(", "name", ")", "self", ".", "_keypad", "=", "keypad", "self", ".", "_area_name", "=", "area_name", "self", ".", "_button_name", "=", "button", ".", "name", "self", ".", "_button", "=", "button", "self", ".", "_event", "=", "\"lutron_event\"", "self", ".", "_full_id", "=", "slugify", "(", "f\"{area_name} {keypad.name}: {button.name}\"", ")", "button", ".", "subscribe", "(", "self", ".", "button_callback", ",", "None", ")" ]
[ 137, 4 ]
[ 152, 52 ]
python
en
['en', 'en', 'en']
True
LutronButton.button_callback
(self, button, context, event, params)
Fire an event about a button being pressed or released.
Fire an event about a button being pressed or released.
def button_callback(self, button, context, event, params): """Fire an event about a button being pressed or released.""" # Events per button type: # RaiseLower -> pressed/released # SingleAction -> single action = None if self._has_release_event: if event == Button.Event.PRESSED: action = "pressed" else: action = "released" elif event == Button.Event.PRESSED: action = "single" if action: data = {ATTR_ID: self._id, ATTR_ACTION: action, ATTR_FULL_ID: self._full_id} self._hass.bus.fire(self._event, data)
[ "def", "button_callback", "(", "self", ",", "button", ",", "context", ",", "event", ",", "params", ")", ":", "# Events per button type:", "# RaiseLower -> pressed/released", "# SingleAction -> single", "action", "=", "None", "if", "self", ".", "_has_release_event", ":", "if", "event", "==", "Button", ".", "Event", ".", "PRESSED", ":", "action", "=", "\"pressed\"", "else", ":", "action", "=", "\"released\"", "elif", "event", "==", "Button", ".", "Event", ".", "PRESSED", ":", "action", "=", "\"single\"", "if", "action", ":", "data", "=", "{", "ATTR_ID", ":", "self", ".", "_id", ",", "ATTR_ACTION", ":", "action", ",", "ATTR_FULL_ID", ":", "self", ".", "_full_id", "}", "self", ".", "_hass", ".", "bus", ".", "fire", "(", "self", ".", "_event", ",", "data", ")" ]
[ 154, 4 ]
[ 170, 50 ]
python
en
['en', 'en', 'en']
True
shift_tokens_right
(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int)
Shift input ids one token to the right.
Shift input ids one token to the right.
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids
[ "def", "shift_tokens_right", "(", "input_ids", ":", "torch", ".", "Tensor", ",", "pad_token_id", ":", "int", ",", "decoder_start_token_id", ":", "int", ")", ":", "shifted_input_ids", "=", "input_ids", ".", "new_zeros", "(", "input_ids", ".", "shape", ")", "shifted_input_ids", "[", ":", ",", "1", ":", "]", "=", "input_ids", "[", ":", ",", ":", "-", "1", "]", ".", "clone", "(", ")", "shifted_input_ids", "[", ":", ",", "0", "]", "=", "decoder_start_token_id", "assert", "pad_token_id", "is", "not", "None", ",", "\"self.model.config.pad_token_id has to be defined.\"", "# replace possible -100 values in labels by `pad_token_id`", "shifted_input_ids", ".", "masked_fill_", "(", "shifted_input_ids", "==", "-", "100", ",", "pad_token_id", ")", "return", "shifted_input_ids" ]
[ 58, 0 ]
[ 70, 28 ]
python
en
['en', 'error', 'th']
False
_make_causal_mask
(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0)
Make causal mask used for bi-directional self-attention.
Make causal mask used for bi-directional self-attention.
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), float("-inf")) mask_cond = torch.arange(mask.size(-1)) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
[ "def", "_make_causal_mask", "(", "input_ids_shape", ":", "torch", ".", "Size", ",", "dtype", ":", "torch", ".", "dtype", ",", "past_key_values_length", ":", "int", "=", "0", ")", ":", "bsz", ",", "tgt_len", "=", "input_ids_shape", "mask", "=", "torch", ".", "full", "(", "(", "tgt_len", ",", "tgt_len", ")", ",", "float", "(", "\"-inf\"", ")", ")", "mask_cond", "=", "torch", ".", "arange", "(", "mask", ".", "size", "(", "-", "1", ")", ")", "mask", ".", "masked_fill_", "(", "mask_cond", "<", "(", "mask_cond", "+", "1", ")", ".", "view", "(", "mask", ".", "size", "(", "-", "1", ")", ",", "1", ")", ",", "0", ")", "mask", "=", "mask", ".", "to", "(", "dtype", ")", "if", "past_key_values_length", ">", "0", ":", "mask", "=", "torch", ".", "cat", "(", "[", "torch", ".", "zeros", "(", "tgt_len", ",", "past_key_values_length", ",", "dtype", "=", "dtype", ")", ",", "mask", "]", ",", "dim", "=", "-", "1", ")", "return", "mask", "[", "None", ",", "None", ",", ":", ",", ":", "]", ".", "expand", "(", "bsz", ",", "1", ",", "tgt_len", ",", "tgt_len", "+", "past_key_values_length", ")" ]
[ 74, 0 ]
[ 86, 91 ]
python
en
['en', 'error', 'th']
False
_expand_mask
(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None)
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
[ "def", "_expand_mask", "(", "mask", ":", "torch", ".", "Tensor", ",", "dtype", ":", "torch", ".", "dtype", ",", "tgt_len", ":", "Optional", "[", "int", "]", "=", "None", ")", ":", "bsz", ",", "src_len", "=", "mask", ".", "size", "(", ")", "tgt_len", "=", "tgt_len", "if", "tgt_len", "is", "not", "None", "else", "src_len", "expanded_mask", "=", "mask", "[", ":", ",", "None", ",", "None", ",", ":", "]", ".", "expand", "(", "bsz", ",", "1", ",", "tgt_len", ",", "src_len", ")", ".", "to", "(", "dtype", ")", "inverted_mask", "=", "1.0", "-", "expanded_mask", "return", "inverted_mask", ".", "masked_fill", "(", "inverted_mask", ".", "bool", "(", ")", ",", "torch", ".", "finfo", "(", "dtype", ")", ".", "min", ")" ]
[ 90, 0 ]
[ 101, 82 ]
python
en
['en', 'error', 'th']
False
create_position_ids_from_input_ids
(input_ids, padding_idx, past_key_values_length=0)
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`.
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`.
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx
[ "def", "create_position_ids_from_input_ids", "(", "input_ids", ",", "padding_idx", ",", "past_key_values_length", "=", "0", ")", ":", "# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.", "mask", "=", "input_ids", ".", "ne", "(", "padding_idx", ")", ".", "int", "(", ")", "incremental_indices", "=", "(", "torch", ".", "cumsum", "(", "mask", ",", "dim", "=", "1", ")", ".", "type_as", "(", "mask", ")", "+", "past_key_values_length", ")", "*", "mask", "return", "incremental_indices", ".", "long", "(", ")", "+", "padding_idx" ]
[ 104, 0 ]
[ 112, 51 ]
python
en
['en', 'error', 'th']
False
M2M100SinusoidalPositionalEmbedding.get_embedding
(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None)
Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need".
Build sinusoidal embeddings.
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb
[ "def", "get_embedding", "(", "num_embeddings", ":", "int", ",", "embedding_dim", ":", "int", ",", "padding_idx", ":", "Optional", "[", "int", "]", "=", "None", ")", ":", "half_dim", "=", "embedding_dim", "//", "2", "emb", "=", "math", ".", "log", "(", "10000", ")", "/", "(", "half_dim", "-", "1", ")", "emb", "=", "torch", ".", "exp", "(", "torch", ".", "arange", "(", "half_dim", ",", "dtype", "=", "torch", ".", "float", ")", "*", "-", "emb", ")", "emb", "=", "torch", ".", "arange", "(", "num_embeddings", ",", "dtype", "=", "torch", ".", "float", ")", ".", "unsqueeze", "(", "1", ")", "*", "emb", ".", "unsqueeze", "(", "0", ")", "emb", "=", "torch", ".", "cat", "(", "[", "torch", ".", "sin", "(", "emb", ")", ",", "torch", ".", "cos", "(", "emb", ")", "]", ",", "dim", "=", "1", ")", ".", "view", "(", "num_embeddings", ",", "-", "1", ")", "if", "embedding_dim", "%", "2", "==", "1", ":", "# zero pad", "emb", "=", "torch", ".", "cat", "(", "[", "emb", ",", "torch", ".", "zeros", "(", "num_embeddings", ",", "1", ")", "]", ",", "dim", "=", "1", ")", "if", "padding_idx", "is", "not", "None", ":", "emb", "[", "padding_idx", ",", ":", "]", "=", "0", "return", "emb" ]
[ 136, 4 ]
[ 154, 18 ]
python
en
['en', 'error', 'th']
False
M2M100SinusoidalPositionalEmbedding.create_position_ids_from_inputs_embeds
(self, inputs_embeds)
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape).contiguous()
[ "def", "create_position_ids_from_inputs_embeds", "(", "self", ",", "inputs_embeds", ")", ":", "input_shape", "=", "inputs_embeds", ".", "size", "(", ")", "[", ":", "-", "1", "]", "sequence_length", "=", "input_shape", "[", "1", "]", "position_ids", "=", "torch", ".", "arange", "(", "self", ".", "padding_idx", "+", "1", ",", "sequence_length", "+", "self", ".", "padding_idx", "+", "1", ",", "dtype", "=", "torch", ".", "long", ",", "device", "=", "inputs_embeds", ".", "device", ")", "return", "position_ids", ".", "unsqueeze", "(", "0", ")", ".", "expand", "(", "input_shape", ")", ".", "contiguous", "(", ")" ]
[ 177, 4 ]
[ 192, 73 ]
python
en
['en', 'error', 'th']
False
M2M100Attention.forward
( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, )
Input shape: Batch x Time x Channel
Input shape: Batch x Time x Channel
def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) assert attn_weights.size() == ( bsz * self.num_heads, tgt_len, src_len, ), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" if attention_mask is not None: assert attention_mask.size() == ( bsz, 1, tgt_len, src_len, ), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = F.softmax(attn_weights, dim=-1) if layer_head_mask is not None: assert layer_head_mask.size() == ( self.num_heads, ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) assert attn_output.size() == ( bsz * self.num_heads, tgt_len, self.head_dim, ), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}" attn_output = ( attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) .transpose(1, 2) .reshape(bsz, tgt_len, embed_dim) ) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value
[ "def", "forward", "(", "self", ",", "hidden_states", ":", "torch", ".", "Tensor", ",", "key_value_states", ":", "Optional", "[", "torch", ".", "Tensor", "]", "=", "None", ",", "past_key_value", ":", "Optional", "[", "Tuple", "[", "torch", ".", "Tensor", "]", "]", "=", "None", ",", "attention_mask", ":", "Optional", "[", "torch", ".", "Tensor", "]", "=", "None", ",", "layer_head_mask", ":", "Optional", "[", "torch", ".", "Tensor", "]", "=", "None", ",", "output_attentions", ":", "bool", "=", "False", ",", ")", "->", "Tuple", "[", "torch", ".", "Tensor", ",", "Optional", "[", "torch", ".", "Tensor", "]", ",", "Optional", "[", "Tuple", "[", "torch", ".", "Tensor", "]", "]", "]", ":", "# if key_value_states are provided this layer is used as a cross-attention layer", "# for the decoder", "is_cross_attention", "=", "key_value_states", "is", "not", "None", "bsz", ",", "tgt_len", ",", "embed_dim", "=", "hidden_states", ".", "size", "(", ")", "# get query proj", "query_states", "=", "self", ".", "q_proj", "(", "hidden_states", ")", "*", "self", ".", "scaling", "# get key, value proj", "if", "is_cross_attention", "and", "past_key_value", "is", "not", "None", ":", "# reuse k,v, cross_attentions", "key_states", "=", "past_key_value", "[", "0", "]", "value_states", "=", "past_key_value", "[", "1", "]", "elif", "is_cross_attention", ":", "# cross_attentions", "key_states", "=", "self", ".", "_shape", "(", "self", ".", "k_proj", "(", "key_value_states", ")", ",", "-", "1", ",", "bsz", ")", "value_states", "=", "self", ".", "_shape", "(", "self", ".", "v_proj", "(", "key_value_states", ")", ",", "-", "1", ",", "bsz", ")", "elif", "past_key_value", "is", "not", "None", ":", "# reuse k, v, self_attention", "key_states", "=", "self", ".", "_shape", "(", "self", ".", "k_proj", "(", "hidden_states", ")", ",", "-", "1", ",", "bsz", ")", "value_states", "=", "self", ".", "_shape", "(", "self", ".", "v_proj", "(", "hidden_states", ")", ",", "-", "1", ",", "bsz", ")", "key_states", "=", "torch", ".", "cat", "(", "[", "past_key_value", "[", "0", "]", ",", "key_states", "]", ",", "dim", "=", "2", ")", "value_states", "=", "torch", ".", "cat", "(", "[", "past_key_value", "[", "1", "]", ",", "value_states", "]", ",", "dim", "=", "2", ")", "else", ":", "# self_attention", "key_states", "=", "self", ".", "_shape", "(", "self", ".", "k_proj", "(", "hidden_states", ")", ",", "-", "1", ",", "bsz", ")", "value_states", "=", "self", ".", "_shape", "(", "self", ".", "v_proj", "(", "hidden_states", ")", ",", "-", "1", ",", "bsz", ")", "if", "self", ".", "is_decoder", ":", "# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.", "# Further calls to cross_attention layer can then reuse all cross-attention", "# key/value_states (first \"if\" case)", "# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of", "# all previous decoder key/value_states. Further calls to uni-directional self-attention", "# can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)", "# if encoder bi-directional self-attention `past_key_value` is always `None`", "past_key_value", "=", "(", "key_states", ",", "value_states", ")", "proj_shape", "=", "(", "bsz", "*", "self", ".", "num_heads", ",", "-", "1", ",", "self", ".", "head_dim", ")", "query_states", "=", "self", ".", "_shape", "(", "query_states", ",", "tgt_len", ",", "bsz", ")", ".", "view", "(", "*", "proj_shape", ")", "key_states", "=", "key_states", ".", "view", "(", "*", "proj_shape", ")", "value_states", "=", "value_states", ".", "view", "(", "*", "proj_shape", ")", "src_len", "=", "key_states", ".", "size", "(", "1", ")", "attn_weights", "=", "torch", ".", "bmm", "(", "query_states", ",", "key_states", ".", "transpose", "(", "1", ",", "2", ")", ")", "assert", "attn_weights", ".", "size", "(", ")", "==", "(", "bsz", "*", "self", ".", "num_heads", ",", "tgt_len", ",", "src_len", ",", ")", ",", "f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}\"", "if", "attention_mask", "is", "not", "None", ":", "assert", "attention_mask", ".", "size", "(", ")", "==", "(", "bsz", ",", "1", ",", "tgt_len", ",", "src_len", ",", ")", ",", "f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"", "attn_weights", "=", "attn_weights", ".", "view", "(", "bsz", ",", "self", ".", "num_heads", ",", "tgt_len", ",", "src_len", ")", "+", "attention_mask", "attn_weights", "=", "attn_weights", ".", "view", "(", "bsz", "*", "self", ".", "num_heads", ",", "tgt_len", ",", "src_len", ")", "attn_weights", "=", "F", ".", "softmax", "(", "attn_weights", ",", "dim", "=", "-", "1", ")", "if", "layer_head_mask", "is", "not", "None", ":", "assert", "layer_head_mask", ".", "size", "(", ")", "==", "(", "self", ".", "num_heads", ",", ")", ",", "f\"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}\"", "attn_weights", "=", "layer_head_mask", ".", "view", "(", "1", ",", "-", "1", ",", "1", ",", "1", ")", "*", "attn_weights", ".", "view", "(", "bsz", ",", "self", ".", "num_heads", ",", "tgt_len", ",", "src_len", ")", "attn_weights", "=", "attn_weights", ".", "view", "(", "bsz", "*", "self", ".", "num_heads", ",", "tgt_len", ",", "src_len", ")", "if", "output_attentions", ":", "# this operation is a bit akward, but it's required to", "# make sure that attn_weights keeps its gradient.", "# In order to do so, attn_weights have to reshaped", "# twice and have to be reused in the following", "attn_weights_reshaped", "=", "attn_weights", ".", "view", "(", "bsz", ",", "self", ".", "num_heads", ",", "tgt_len", ",", "src_len", ")", "attn_weights", "=", "attn_weights_reshaped", ".", "view", "(", "bsz", "*", "self", ".", "num_heads", ",", "tgt_len", ",", "src_len", ")", "else", ":", "attn_weights_reshaped", "=", "None", "attn_probs", "=", "F", ".", "dropout", "(", "attn_weights", ",", "p", "=", "self", ".", "dropout", ",", "training", "=", "self", ".", "training", ")", "attn_output", "=", "torch", ".", "bmm", "(", "attn_probs", ",", "value_states", ")", "assert", "attn_output", ".", "size", "(", ")", "==", "(", "bsz", "*", "self", ".", "num_heads", ",", "tgt_len", ",", "self", ".", "head_dim", ",", ")", ",", "f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}\"", "attn_output", "=", "(", "attn_output", ".", "view", "(", "bsz", ",", "self", ".", "num_heads", ",", "tgt_len", ",", "self", ".", "head_dim", ")", ".", "transpose", "(", "1", ",", "2", ")", ".", "reshape", "(", "bsz", ",", "tgt_len", ",", "embed_dim", ")", ")", "attn_output", "=", "self", ".", "out_proj", "(", "attn_output", ")", "return", "attn_output", ",", "attn_weights_reshaped", ",", "past_key_value" ]
[ 226, 4 ]
[ 335, 65 ]
python
en
['en', 'pl', 'en']
True
M2M100EncoderLayer.forward
( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, )
Args: hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size `(config.encoder_attention_heads,)`. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail.
Args: hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size `(config.encoder_attention_heads,)`. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail.
def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ): """ Args: hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size `(config.encoder_attention_heads,)`. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
[ "def", "forward", "(", "self", ",", "hidden_states", ":", "torch", ".", "Tensor", ",", "attention_mask", ":", "torch", ".", "Tensor", ",", "layer_head_mask", ":", "torch", ".", "Tensor", ",", "output_attentions", ":", "bool", "=", "False", ",", ")", ":", "residual", "=", "hidden_states", "hidden_states", "=", "self", ".", "self_attn_layer_norm", "(", "hidden_states", ")", "hidden_states", ",", "attn_weights", ",", "_", "=", "self", ".", "self_attn", "(", "hidden_states", "=", "hidden_states", ",", "attention_mask", "=", "attention_mask", ",", "layer_head_mask", "=", "layer_head_mask", ",", "output_attentions", "=", "output_attentions", ",", ")", "hidden_states", "=", "F", ".", "dropout", "(", "hidden_states", ",", "p", "=", "self", ".", "dropout", ",", "training", "=", "self", ".", "training", ")", "hidden_states", "=", "residual", "+", "hidden_states", "residual", "=", "hidden_states", "hidden_states", "=", "self", ".", "final_layer_norm", "(", "hidden_states", ")", "hidden_states", "=", "self", ".", "activation_fn", "(", "self", ".", "fc1", "(", "hidden_states", ")", ")", "hidden_states", "=", "F", ".", "dropout", "(", "hidden_states", ",", "p", "=", "self", ".", "activation_dropout", ",", "training", "=", "self", ".", "training", ")", "hidden_states", "=", "self", ".", "fc2", "(", "hidden_states", ")", "hidden_states", "=", "F", ".", "dropout", "(", "hidden_states", ",", "p", "=", "self", ".", "dropout", ",", "training", "=", "self", ".", "training", ")", "hidden_states", "=", "residual", "+", "hidden_states", "if", "hidden_states", ".", "dtype", "==", "torch", ".", "float16", "and", "(", "torch", ".", "isinf", "(", "hidden_states", ")", ".", "any", "(", ")", "or", "torch", ".", "isnan", "(", "hidden_states", ")", ".", "any", "(", ")", ")", ":", "clamp_value", "=", "torch", ".", "finfo", "(", "hidden_states", ".", "dtype", ")", ".", "max", "-", "1000", "hidden_states", "=", "torch", ".", "clamp", "(", "hidden_states", ",", "min", "=", "-", "clamp_value", ",", "max", "=", "clamp_value", ")", "outputs", "=", "(", "hidden_states", ",", ")", "if", "output_attentions", ":", "outputs", "+=", "(", "attn_weights", ",", ")", "return", "outputs" ]
[ 356, 4 ]
[ 404, 22 ]
python
en
['en', 'error', 'th']
False
M2M100DecoderLayer.forward
( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, encoder_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, )
Args: hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size `(config.encoder_attention_heads,)`. encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of size `(config.encoder_attention_heads,)`. past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail.
Args: hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size `(config.encoder_attention_heads,)`. encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of size `(config.encoder_attention_heads,)`. past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail.
def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, encoder_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ): """ Args: hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size `(config.encoder_attention_heads,)`. encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of size `(config.encoder_attention_heads,)`. past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs
[ "def", "forward", "(", "self", ",", "hidden_states", ":", "torch", ".", "Tensor", ",", "attention_mask", ":", "Optional", "[", "torch", ".", "Tensor", "]", "=", "None", ",", "encoder_hidden_states", ":", "Optional", "[", "torch", ".", "Tensor", "]", "=", "None", ",", "encoder_attention_mask", ":", "Optional", "[", "torch", ".", "Tensor", "]", "=", "None", ",", "layer_head_mask", ":", "Optional", "[", "torch", ".", "Tensor", "]", "=", "None", ",", "encoder_layer_head_mask", ":", "Optional", "[", "torch", ".", "Tensor", "]", "=", "None", ",", "past_key_value", ":", "Optional", "[", "Tuple", "[", "torch", ".", "Tensor", "]", "]", "=", "None", ",", "output_attentions", ":", "Optional", "[", "bool", "]", "=", "False", ",", "use_cache", ":", "Optional", "[", "bool", "]", "=", "True", ",", ")", ":", "residual", "=", "hidden_states", "hidden_states", "=", "self", ".", "self_attn_layer_norm", "(", "hidden_states", ")", "# Self Attention", "# decoder uni-directional self-attention cached key/values tuple is at positions 1,2", "self_attn_past_key_value", "=", "past_key_value", "[", ":", "2", "]", "if", "past_key_value", "is", "not", "None", "else", "None", "# add present self-attn cache to positions 1,2 of present_key_value tuple", "hidden_states", ",", "self_attn_weights", ",", "present_key_value", "=", "self", ".", "self_attn", "(", "hidden_states", "=", "hidden_states", ",", "past_key_value", "=", "self_attn_past_key_value", ",", "attention_mask", "=", "attention_mask", ",", "layer_head_mask", "=", "layer_head_mask", ",", "output_attentions", "=", "output_attentions", ",", ")", "hidden_states", "=", "F", ".", "dropout", "(", "hidden_states", ",", "p", "=", "self", ".", "dropout", ",", "training", "=", "self", ".", "training", ")", "hidden_states", "=", "residual", "+", "hidden_states", "# Cross-Attention Block", "cross_attn_present_key_value", "=", "None", "cross_attn_weights", "=", "None", "if", "encoder_hidden_states", "is", "not", "None", ":", "residual", "=", "hidden_states", "hidden_states", "=", "self", ".", "encoder_attn_layer_norm", "(", "hidden_states", ")", "# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple", "cross_attn_past_key_value", "=", "past_key_value", "[", "-", "2", ":", "]", "if", "past_key_value", "is", "not", "None", "else", "None", "hidden_states", ",", "cross_attn_weights", ",", "cross_attn_present_key_value", "=", "self", ".", "encoder_attn", "(", "hidden_states", "=", "hidden_states", ",", "key_value_states", "=", "encoder_hidden_states", ",", "attention_mask", "=", "encoder_attention_mask", ",", "layer_head_mask", "=", "layer_head_mask", ",", "past_key_value", "=", "cross_attn_past_key_value", ",", "output_attentions", "=", "output_attentions", ",", ")", "hidden_states", "=", "F", ".", "dropout", "(", "hidden_states", ",", "p", "=", "self", ".", "dropout", ",", "training", "=", "self", ".", "training", ")", "hidden_states", "=", "residual", "+", "hidden_states", "# add cross-attn to positions 3,4 of present_key_value tuple", "present_key_value", "=", "present_key_value", "+", "cross_attn_present_key_value", "# Fully Connected", "residual", "=", "hidden_states", "hidden_states", "=", "self", ".", "final_layer_norm", "(", "hidden_states", ")", "hidden_states", "=", "self", ".", "activation_fn", "(", "self", ".", "fc1", "(", "hidden_states", ")", ")", "hidden_states", "=", "F", ".", "dropout", "(", "hidden_states", ",", "p", "=", "self", ".", "activation_dropout", ",", "training", "=", "self", ".", "training", ")", "hidden_states", "=", "self", ".", "fc2", "(", "hidden_states", ")", "hidden_states", "=", "F", ".", "dropout", "(", "hidden_states", ",", "p", "=", "self", ".", "dropout", ",", "training", "=", "self", ".", "training", ")", "hidden_states", "=", "residual", "+", "hidden_states", "outputs", "=", "(", "hidden_states", ",", ")", "if", "output_attentions", ":", "outputs", "+=", "(", "self_attn_weights", ",", "cross_attn_weights", ")", "if", "use_cache", ":", "outputs", "+=", "(", "present_key_value", ",", ")", "return", "outputs" ]
[ 435, 4 ]
[ 521, 22 ]
python
en
['en', 'error', 'th']
False
M2M100Encoder.forward
( self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, )
r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.M2M100Tokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.
def forward( self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.M2M100Tokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_ids, inputs_embeds) hidden_states = inputs_embeds + embed_pos hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: if getattr(self.config, "gradient_checkpointing", False) and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
[ "def", "forward", "(", "self", ",", "input_ids", "=", "None", ",", "attention_mask", "=", "None", ",", "head_mask", "=", "None", ",", "inputs_embeds", "=", "None", ",", "output_attentions", "=", "None", ",", "output_hidden_states", "=", "None", ",", "return_dict", "=", "None", ",", ")", ":", "output_attentions", "=", "output_attentions", "if", "output_attentions", "is", "not", "None", "else", "self", ".", "config", ".", "output_attentions", "output_hidden_states", "=", "(", "output_hidden_states", "if", "output_hidden_states", "is", "not", "None", "else", "self", ".", "config", ".", "output_hidden_states", ")", "return_dict", "=", "return_dict", "if", "return_dict", "is", "not", "None", "else", "self", ".", "config", ".", "use_return_dict", "# retrieve input_ids and inputs_embeds", "if", "input_ids", "is", "not", "None", "and", "inputs_embeds", "is", "not", "None", ":", "raise", "ValueError", "(", "\"You cannot specify both input_ids and inputs_embeds at the same time\"", ")", "elif", "input_ids", "is", "not", "None", ":", "input_shape", "=", "input_ids", ".", "size", "(", ")", "input_ids", "=", "input_ids", ".", "view", "(", "-", "1", ",", "input_shape", "[", "-", "1", "]", ")", "elif", "inputs_embeds", "is", "not", "None", ":", "input_shape", "=", "inputs_embeds", ".", "size", "(", ")", "[", ":", "-", "1", "]", "else", ":", "raise", "ValueError", "(", "\"You have to specify either input_ids or inputs_embeds\"", ")", "if", "inputs_embeds", "is", "None", ":", "inputs_embeds", "=", "self", ".", "embed_tokens", "(", "input_ids", ")", "*", "self", ".", "embed_scale", "embed_pos", "=", "self", ".", "embed_positions", "(", "input_ids", ",", "inputs_embeds", ")", "hidden_states", "=", "inputs_embeds", "+", "embed_pos", "hidden_states", "=", "F", ".", "dropout", "(", "hidden_states", ",", "p", "=", "self", ".", "dropout", ",", "training", "=", "self", ".", "training", ")", "# expand attention_mask", "if", "attention_mask", "is", "not", "None", ":", "# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]", "attention_mask", "=", "_expand_mask", "(", "attention_mask", ",", "inputs_embeds", ".", "dtype", ")", "encoder_states", "=", "(", ")", "if", "output_hidden_states", "else", "None", "all_attentions", "=", "(", ")", "if", "output_attentions", "else", "None", "# check if head_mask has a correct number of layers specified if desired", "if", "head_mask", "is", "not", "None", ":", "assert", "head_mask", ".", "size", "(", ")", "[", "0", "]", "==", "(", "len", "(", "self", ".", "layers", ")", ")", ",", "f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"", "for", "idx", ",", "encoder_layer", "in", "enumerate", "(", "self", ".", "layers", ")", ":", "if", "output_hidden_states", ":", "encoder_states", "=", "encoder_states", "+", "(", "hidden_states", ",", ")", "# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)", "dropout_probability", "=", "random", ".", "uniform", "(", "0", ",", "1", ")", "if", "self", ".", "training", "and", "(", "dropout_probability", "<", "self", ".", "layerdrop", ")", ":", "# skip the layer", "layer_outputs", "=", "(", "None", ",", "None", ")", "else", ":", "if", "getattr", "(", "self", ".", "config", ",", "\"gradient_checkpointing\"", ",", "False", ")", "and", "self", ".", "training", ":", "def", "create_custom_forward", "(", "module", ")", ":", "def", "custom_forward", "(", "*", "inputs", ")", ":", "return", "module", "(", "*", "inputs", ",", "output_attentions", ")", "return", "custom_forward", "layer_outputs", "=", "torch", ".", "utils", ".", "checkpoint", ".", "checkpoint", "(", "create_custom_forward", "(", "encoder_layer", ")", ",", "hidden_states", ",", "attention_mask", ",", "(", "head_mask", "[", "idx", "]", "if", "head_mask", "is", "not", "None", "else", "None", ")", ",", ")", "else", ":", "layer_outputs", "=", "encoder_layer", "(", "hidden_states", ",", "attention_mask", ",", "layer_head_mask", "=", "(", "head_mask", "[", "idx", "]", "if", "head_mask", "is", "not", "None", "else", "None", ")", ",", "output_attentions", "=", "output_attentions", ",", ")", "hidden_states", "=", "layer_outputs", "[", "0", "]", "if", "output_attentions", ":", "all_attentions", "=", "all_attentions", "+", "(", "layer_outputs", "[", "1", "]", ",", ")", "hidden_states", "=", "self", ".", "layer_norm", "(", "hidden_states", ")", "if", "output_hidden_states", ":", "encoder_states", "=", "encoder_states", "+", "(", "hidden_states", ",", ")", "if", "not", "return_dict", ":", "return", "tuple", "(", "v", "for", "v", "in", "[", "hidden_states", ",", "encoder_states", ",", "all_attentions", "]", "if", "v", "is", "not", "None", ")", "return", "BaseModelOutput", "(", "last_hidden_state", "=", "hidden_states", ",", "hidden_states", "=", "encoder_states", ",", "attentions", "=", "all_attentions", ")" ]
[ 673, 4 ]
[ 796, 9 ]
python
cy
['en', 'cy', 'hi']
False
M2M100Decoder.forward
( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, encoder_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, )
r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.M2M100Tokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.
def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, encoder_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.M2M100Tokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length ).to(self.device) if attention_mask is not None and combined_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = combined_attention_mask + _expand_mask( attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) # embed positions positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions else None next_decoder_cache = () if use_cache else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warn( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, use_cache) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, combined_attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, encoder_head_mask[idx] if encoder_head_mask is not None else None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), encoder_layer_head_mask=(encoder_head_mask[idx] if encoder_head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, )
[ "def", "forward", "(", "self", ",", "input_ids", "=", "None", ",", "attention_mask", "=", "None", ",", "encoder_hidden_states", "=", "None", ",", "encoder_attention_mask", "=", "None", ",", "head_mask", "=", "None", ",", "encoder_head_mask", "=", "None", ",", "past_key_values", "=", "None", ",", "inputs_embeds", "=", "None", ",", "use_cache", "=", "None", ",", "output_attentions", "=", "None", ",", "output_hidden_states", "=", "None", ",", "return_dict", "=", "None", ",", ")", ":", "output_attentions", "=", "output_attentions", "if", "output_attentions", "is", "not", "None", "else", "self", ".", "config", ".", "output_attentions", "output_hidden_states", "=", "(", "output_hidden_states", "if", "output_hidden_states", "is", "not", "None", "else", "self", ".", "config", ".", "output_hidden_states", ")", "use_cache", "=", "use_cache", "if", "use_cache", "is", "not", "None", "else", "self", ".", "config", ".", "use_cache", "return_dict", "=", "return_dict", "if", "return_dict", "is", "not", "None", "else", "self", ".", "config", ".", "use_return_dict", "# retrieve input_ids and inputs_embeds", "if", "input_ids", "is", "not", "None", "and", "inputs_embeds", "is", "not", "None", ":", "raise", "ValueError", "(", "\"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time\"", ")", "elif", "input_ids", "is", "not", "None", ":", "input_shape", "=", "input_ids", ".", "size", "(", ")", "input_ids", "=", "input_ids", ".", "view", "(", "-", "1", ",", "input_shape", "[", "-", "1", "]", ")", "elif", "inputs_embeds", "is", "not", "None", ":", "input_shape", "=", "inputs_embeds", ".", "size", "(", ")", "[", ":", "-", "1", "]", "else", ":", "raise", "ValueError", "(", "\"You have to specify either decoder_input_ids or decoder_inputs_embeds\"", ")", "# past_key_values_length", "past_key_values_length", "=", "past_key_values", "[", "0", "]", "[", "0", "]", ".", "shape", "[", "2", "]", "if", "past_key_values", "is", "not", "None", "else", "0", "if", "inputs_embeds", "is", "None", ":", "inputs_embeds", "=", "self", ".", "embed_tokens", "(", "input_ids", ")", "*", "self", ".", "embed_scale", "# create causal mask", "# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]", "combined_attention_mask", "=", "None", "if", "input_shape", "[", "-", "1", "]", ">", "1", ":", "combined_attention_mask", "=", "_make_causal_mask", "(", "input_shape", ",", "inputs_embeds", ".", "dtype", ",", "past_key_values_length", "=", "past_key_values_length", ")", ".", "to", "(", "self", ".", "device", ")", "if", "attention_mask", "is", "not", "None", "and", "combined_attention_mask", "is", "not", "None", ":", "# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]", "combined_attention_mask", "=", "combined_attention_mask", "+", "_expand_mask", "(", "attention_mask", ",", "inputs_embeds", ".", "dtype", ",", "tgt_len", "=", "input_shape", "[", "-", "1", "]", ")", "# expand encoder attention mask", "if", "encoder_hidden_states", "is", "not", "None", "and", "encoder_attention_mask", "is", "not", "None", ":", "# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]", "encoder_attention_mask", "=", "_expand_mask", "(", "encoder_attention_mask", ",", "inputs_embeds", ".", "dtype", ",", "tgt_len", "=", "input_shape", "[", "-", "1", "]", ")", "# embed positions", "positions", "=", "self", ".", "embed_positions", "(", "input_ids", ",", "inputs_embeds", ",", "past_key_values_length", ")", "hidden_states", "=", "inputs_embeds", "+", "positions", "hidden_states", "=", "F", ".", "dropout", "(", "hidden_states", ",", "p", "=", "self", ".", "dropout", ",", "training", "=", "self", ".", "training", ")", "# decoder layers", "all_hidden_states", "=", "(", ")", "if", "output_hidden_states", "else", "None", "all_self_attns", "=", "(", ")", "if", "output_attentions", "else", "None", "all_cross_attentions", "=", "(", ")", "if", "output_attentions", "else", "None", "next_decoder_cache", "=", "(", ")", "if", "use_cache", "else", "None", "# check if head_mask has a correct number of layers specified if desired", "if", "head_mask", "is", "not", "None", ":", "assert", "head_mask", ".", "size", "(", ")", "[", "0", "]", "==", "(", "len", "(", "self", ".", "layers", ")", ")", ",", "f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"", "for", "idx", ",", "decoder_layer", "in", "enumerate", "(", "self", ".", "layers", ")", ":", "# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)", "if", "output_hidden_states", ":", "all_hidden_states", "+=", "(", "hidden_states", ",", ")", "dropout_probability", "=", "random", ".", "uniform", "(", "0", ",", "1", ")", "if", "self", ".", "training", "and", "(", "dropout_probability", "<", "self", ".", "layerdrop", ")", ":", "continue", "past_key_value", "=", "past_key_values", "[", "idx", "]", "if", "past_key_values", "is", "not", "None", "else", "None", "if", "getattr", "(", "self", ".", "config", ",", "\"gradient_checkpointing\"", ",", "False", ")", "and", "self", ".", "training", ":", "if", "use_cache", ":", "logger", ".", "warn", "(", "\"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"", "\"`use_cache=False`...\"", ")", "use_cache", "=", "False", "def", "create_custom_forward", "(", "module", ")", ":", "def", "custom_forward", "(", "*", "inputs", ")", ":", "# None for past_key_value", "return", "module", "(", "*", "inputs", ",", "output_attentions", ",", "use_cache", ")", "return", "custom_forward", "layer_outputs", "=", "torch", ".", "utils", ".", "checkpoint", ".", "checkpoint", "(", "create_custom_forward", "(", "decoder_layer", ")", ",", "hidden_states", ",", "combined_attention_mask", ",", "encoder_hidden_states", ",", "encoder_attention_mask", ",", "head_mask", "[", "idx", "]", "if", "head_mask", "is", "not", "None", "else", "None", ",", "encoder_head_mask", "[", "idx", "]", "if", "encoder_head_mask", "is", "not", "None", "else", "None", ",", "None", ",", ")", "else", ":", "layer_outputs", "=", "decoder_layer", "(", "hidden_states", ",", "attention_mask", "=", "combined_attention_mask", ",", "encoder_hidden_states", "=", "encoder_hidden_states", ",", "encoder_attention_mask", "=", "encoder_attention_mask", ",", "layer_head_mask", "=", "(", "head_mask", "[", "idx", "]", "if", "head_mask", "is", "not", "None", "else", "None", ")", ",", "encoder_layer_head_mask", "=", "(", "encoder_head_mask", "[", "idx", "]", "if", "encoder_head_mask", "is", "not", "None", "else", "None", ")", ",", "past_key_value", "=", "past_key_value", ",", "output_attentions", "=", "output_attentions", ",", "use_cache", "=", "use_cache", ",", ")", "hidden_states", "=", "layer_outputs", "[", "0", "]", "if", "use_cache", ":", "next_decoder_cache", "+=", "(", "layer_outputs", "[", "3", "if", "output_attentions", "else", "1", "]", ",", ")", "if", "output_attentions", ":", "all_self_attns", "+=", "(", "layer_outputs", "[", "1", "]", ",", ")", "all_cross_attentions", "+=", "(", "layer_outputs", "[", "2", "]", ",", ")", "hidden_states", "=", "self", ".", "layer_norm", "(", "hidden_states", ")", "# add hidden states from the last decoder layer", "if", "output_hidden_states", ":", "all_hidden_states", "+=", "(", "hidden_states", ",", ")", "next_cache", "=", "next_decoder_cache", "if", "use_cache", "else", "None", "if", "not", "return_dict", ":", "return", "tuple", "(", "v", "for", "v", "in", "[", "hidden_states", ",", "next_cache", ",", "all_hidden_states", ",", "all_self_attns", ",", "all_cross_attentions", "]", "if", "v", "is", "not", "None", ")", "return", "BaseModelOutputWithPastAndCrossAttentions", "(", "last_hidden_state", "=", "hidden_states", ",", "past_key_values", "=", "next_cache", ",", "hidden_states", "=", "all_hidden_states", ",", "attentions", "=", "all_self_attns", ",", "cross_attentions", "=", "all_cross_attentions", ",", ")" ]
[ 831, 4 ]
[ 1034, 9 ]
python
cy
['en', 'cy', 'hi']
False
device_reg
(hass)
Return an empty, loaded, registry.
Return an empty, loaded, registry.
def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass)
[ "def", "device_reg", "(", "hass", ")", ":", "return", "mock_device_registry", "(", "hass", ")" ]
[ 24, 0 ]
[ 26, 37 ]
python
en
['en', 'fy', 'en']
True
entity_reg
(hass)
Return an empty, loaded, registry.
Return an empty, loaded, registry.
def entity_reg(hass): """Return an empty, loaded, registry.""" return mock_registry(hass)
[ "def", "entity_reg", "(", "hass", ")", ":", "return", "mock_registry", "(", "hass", ")" ]
[ 30, 0 ]
[ 32, 30 ]
python
en
['en', 'fy', 'en']
True
calls
(hass)
Track calls to a mock service.
Track calls to a mock service.
def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation")
[ "def", "calls", "(", "hass", ")", ":", "return", "async_mock_service", "(", "hass", ",", "\"test\"", ",", "\"automation\"", ")" ]
[ 36, 0 ]
[ 38, 57 ]
python
en
['en', 'en', 'en']
True
test_get_conditions
(hass, device_reg, entity_reg)
Test we get the expected conditions from a remote.
Test we get the expected conditions from a remote.
async def test_get_conditions(hass, device_reg, entity_reg): """Test we get the expected conditions from a remote.""" config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) device_entry = device_reg.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id) expected_conditions = [ { "condition": "device", "domain": DOMAIN, "type": "is_off", "device_id": device_entry.id, "entity_id": f"{DOMAIN}.test_5678", }, { "condition": "device", "domain": DOMAIN, "type": "is_on", "device_id": device_entry.id, "entity_id": f"{DOMAIN}.test_5678", }, ] conditions = await async_get_device_automations(hass, "condition", device_entry.id) assert conditions == expected_conditions
[ "async", "def", "test_get_conditions", "(", "hass", ",", "device_reg", ",", "entity_reg", ")", ":", "config_entry", "=", "MockConfigEntry", "(", "domain", "=", "\"test\"", ",", "data", "=", "{", "}", ")", "config_entry", ".", "add_to_hass", "(", "hass", ")", "device_entry", "=", "device_reg", ".", "async_get_or_create", "(", "config_entry_id", "=", "config_entry", ".", "entry_id", ",", "connections", "=", "{", "(", "device_registry", ".", "CONNECTION_NETWORK_MAC", ",", "\"12:34:56:AB:CD:EF\"", ")", "}", ",", ")", "entity_reg", ".", "async_get_or_create", "(", "DOMAIN", ",", "\"test\"", ",", "\"5678\"", ",", "device_id", "=", "device_entry", ".", "id", ")", "expected_conditions", "=", "[", "{", "\"condition\"", ":", "\"device\"", ",", "\"domain\"", ":", "DOMAIN", ",", "\"type\"", ":", "\"is_off\"", ",", "\"device_id\"", ":", "device_entry", ".", "id", ",", "\"entity_id\"", ":", "f\"{DOMAIN}.test_5678\"", ",", "}", ",", "{", "\"condition\"", ":", "\"device\"", ",", "\"domain\"", ":", "DOMAIN", ",", "\"type\"", ":", "\"is_on\"", ",", "\"device_id\"", ":", "device_entry", ".", "id", ",", "\"entity_id\"", ":", "f\"{DOMAIN}.test_5678\"", ",", "}", ",", "]", "conditions", "=", "await", "async_get_device_automations", "(", "hass", ",", "\"condition\"", ",", "device_entry", ".", "id", ")", "assert", "conditions", "==", "expected_conditions" ]
[ 41, 0 ]
[ 67, 44 ]
python
en
['en', 'en', 'en']
True
test_get_condition_capabilities
(hass, device_reg, entity_reg)
Test we get the expected capabilities from a remote condition.
Test we get the expected capabilities from a remote condition.
async def test_get_condition_capabilities(hass, device_reg, entity_reg): """Test we get the expected capabilities from a remote condition.""" config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) device_entry = device_reg.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id) expected_capabilities = { "extra_fields": [ {"name": "for", "optional": True, "type": "positive_time_period_dict"} ] } conditions = await async_get_device_automations(hass, "condition", device_entry.id) for condition in conditions: capabilities = await async_get_device_automation_capabilities( hass, "condition", condition ) assert capabilities == expected_capabilities
[ "async", "def", "test_get_condition_capabilities", "(", "hass", ",", "device_reg", ",", "entity_reg", ")", ":", "config_entry", "=", "MockConfigEntry", "(", "domain", "=", "\"test\"", ",", "data", "=", "{", "}", ")", "config_entry", ".", "add_to_hass", "(", "hass", ")", "device_entry", "=", "device_reg", ".", "async_get_or_create", "(", "config_entry_id", "=", "config_entry", ".", "entry_id", ",", "connections", "=", "{", "(", "device_registry", ".", "CONNECTION_NETWORK_MAC", ",", "\"12:34:56:AB:CD:EF\"", ")", "}", ",", ")", "entity_reg", ".", "async_get_or_create", "(", "DOMAIN", ",", "\"test\"", ",", "\"5678\"", ",", "device_id", "=", "device_entry", ".", "id", ")", "expected_capabilities", "=", "{", "\"extra_fields\"", ":", "[", "{", "\"name\"", ":", "\"for\"", ",", "\"optional\"", ":", "True", ",", "\"type\"", ":", "\"positive_time_period_dict\"", "}", "]", "}", "conditions", "=", "await", "async_get_device_automations", "(", "hass", ",", "\"condition\"", ",", "device_entry", ".", "id", ")", "for", "condition", "in", "conditions", ":", "capabilities", "=", "await", "async_get_device_automation_capabilities", "(", "hass", ",", "\"condition\"", ",", "condition", ")", "assert", "capabilities", "==", "expected_capabilities" ]
[ 70, 0 ]
[ 89, 52 ]
python
en
['en', 'en', 'en']
True
test_if_state
(hass, calls)
Test for turn_on and turn_off conditions.
Test for turn_on and turn_off conditions.
async def test_if_state(hass, calls): """Test for turn_on and turn_off conditions.""" platform = getattr(hass.components, f"test.{DOMAIN}") platform.init() assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}}) await hass.async_block_till_done() ent1, ent2, ent3 = platform.ENTITIES assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": {"platform": "event", "event_type": "test_event1"}, "condition": [ { "condition": "device", "domain": DOMAIN, "device_id": "", "entity_id": ent1.entity_id, "type": "is_on", } ], "action": { "service": "test.automation", "data_template": { "some": "is_on {{ trigger.%s }}" % "}} - {{ trigger.".join(("platform", "event.event_type")) }, }, }, { "trigger": {"platform": "event", "event_type": "test_event2"}, "condition": [ { "condition": "device", "domain": DOMAIN, "device_id": "", "entity_id": ent1.entity_id, "type": "is_off", } ], "action": { "service": "test.automation", "data_template": { "some": "is_off {{ trigger.%s }}" % "}} - {{ trigger.".join(("platform", "event.event_type")) }, }, }, ] }, ) await hass.async_block_till_done() assert hass.states.get(ent1.entity_id).state == STATE_ON assert len(calls) == 0 hass.bus.async_fire("test_event1") hass.bus.async_fire("test_event2") await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data["some"] == "is_on event - test_event1" hass.states.async_set(ent1.entity_id, STATE_OFF) hass.bus.async_fire("test_event1") hass.bus.async_fire("test_event2") await hass.async_block_till_done() assert len(calls) == 2 assert calls[1].data["some"] == "is_off event - test_event2"
[ "async", "def", "test_if_state", "(", "hass", ",", "calls", ")", ":", "platform", "=", "getattr", "(", "hass", ".", "components", ",", "f\"test.{DOMAIN}\"", ")", "platform", ".", "init", "(", ")", "assert", "await", "async_setup_component", "(", "hass", ",", "DOMAIN", ",", "{", "DOMAIN", ":", "{", "CONF_PLATFORM", ":", "\"test\"", "}", "}", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "ent1", ",", "ent2", ",", "ent3", "=", "platform", ".", "ENTITIES", "assert", "await", "async_setup_component", "(", "hass", ",", "automation", ".", "DOMAIN", ",", "{", "automation", ".", "DOMAIN", ":", "[", "{", "\"trigger\"", ":", "{", "\"platform\"", ":", "\"event\"", ",", "\"event_type\"", ":", "\"test_event1\"", "}", ",", "\"condition\"", ":", "[", "{", "\"condition\"", ":", "\"device\"", ",", "\"domain\"", ":", "DOMAIN", ",", "\"device_id\"", ":", "\"\"", ",", "\"entity_id\"", ":", "ent1", ".", "entity_id", ",", "\"type\"", ":", "\"is_on\"", ",", "}", "]", ",", "\"action\"", ":", "{", "\"service\"", ":", "\"test.automation\"", ",", "\"data_template\"", ":", "{", "\"some\"", ":", "\"is_on {{ trigger.%s }}\"", "%", "\"}} - {{ trigger.\"", ".", "join", "(", "(", "\"platform\"", ",", "\"event.event_type\"", ")", ")", "}", ",", "}", ",", "}", ",", "{", "\"trigger\"", ":", "{", "\"platform\"", ":", "\"event\"", ",", "\"event_type\"", ":", "\"test_event2\"", "}", ",", "\"condition\"", ":", "[", "{", "\"condition\"", ":", "\"device\"", ",", "\"domain\"", ":", "DOMAIN", ",", "\"device_id\"", ":", "\"\"", ",", "\"entity_id\"", ":", "ent1", ".", "entity_id", ",", "\"type\"", ":", "\"is_off\"", ",", "}", "]", ",", "\"action\"", ":", "{", "\"service\"", ":", "\"test.automation\"", ",", "\"data_template\"", ":", "{", "\"some\"", ":", "\"is_off {{ trigger.%s }}\"", "%", "\"}} - {{ trigger.\"", ".", "join", "(", "(", "\"platform\"", ",", "\"event.event_type\"", ")", ")", "}", ",", "}", ",", "}", ",", "]", "}", ",", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "hass", ".", "states", ".", "get", "(", "ent1", ".", "entity_id", ")", ".", "state", "==", "STATE_ON", "assert", "len", "(", "calls", ")", "==", "0", "hass", ".", "bus", ".", "async_fire", "(", "\"test_event1\"", ")", "hass", ".", "bus", ".", "async_fire", "(", "\"test_event2\"", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "len", "(", "calls", ")", "==", "1", "assert", "calls", "[", "0", "]", ".", "data", "[", "\"some\"", "]", "==", "\"is_on event - test_event1\"", "hass", ".", "states", ".", "async_set", "(", "ent1", ".", "entity_id", ",", "STATE_OFF", ")", "hass", ".", "bus", ".", "async_fire", "(", "\"test_event1\"", ")", "hass", ".", "bus", ".", "async_fire", "(", "\"test_event2\"", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "len", "(", "calls", ")", "==", "2", "assert", "calls", "[", "1", "]", ".", "data", "[", "\"some\"", "]", "==", "\"is_off event - test_event2\"" ]
[ 92, 0 ]
[ 163, 64 ]
python
en
['en', 'en', 'en']
True
test_if_fires_on_for_condition
(hass, calls)
Test for firing if condition is on with delay.
Test for firing if condition is on with delay.
async def test_if_fires_on_for_condition(hass, calls): """Test for firing if condition is on with delay.""" point1 = dt_util.utcnow() point2 = point1 + timedelta(seconds=10) point3 = point2 + timedelta(seconds=10) platform = getattr(hass.components, f"test.{DOMAIN}") platform.init() assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}}) await hass.async_block_till_done() ent1, ent2, ent3 = platform.ENTITIES with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow: mock_utcnow.return_value = point1 assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": {"platform": "event", "event_type": "test_event1"}, "condition": { "condition": "device", "domain": DOMAIN, "device_id": "", "entity_id": ent1.entity_id, "type": "is_off", "for": {"seconds": 5}, }, "action": { "service": "test.automation", "data_template": { "some": "is_off {{ trigger.%s }}" % "}} - {{ trigger.".join( ("platform", "event.event_type") ) }, }, } ] }, ) await hass.async_block_till_done() assert hass.states.get(ent1.entity_id).state == STATE_ON assert len(calls) == 0 hass.bus.async_fire("test_event1") await hass.async_block_till_done() assert len(calls) == 0 # Time travel 10 secs into the future mock_utcnow.return_value = point2 hass.bus.async_fire("test_event1") await hass.async_block_till_done() assert len(calls) == 0 hass.states.async_set(ent1.entity_id, STATE_OFF) hass.bus.async_fire("test_event1") await hass.async_block_till_done() assert len(calls) == 0 # Time travel 20 secs into the future mock_utcnow.return_value = point3 hass.bus.async_fire("test_event1") await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data["some"] == "is_off event - test_event1"
[ "async", "def", "test_if_fires_on_for_condition", "(", "hass", ",", "calls", ")", ":", "point1", "=", "dt_util", ".", "utcnow", "(", ")", "point2", "=", "point1", "+", "timedelta", "(", "seconds", "=", "10", ")", "point3", "=", "point2", "+", "timedelta", "(", "seconds", "=", "10", ")", "platform", "=", "getattr", "(", "hass", ".", "components", ",", "f\"test.{DOMAIN}\"", ")", "platform", ".", "init", "(", ")", "assert", "await", "async_setup_component", "(", "hass", ",", "DOMAIN", ",", "{", "DOMAIN", ":", "{", "CONF_PLATFORM", ":", "\"test\"", "}", "}", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "ent1", ",", "ent2", ",", "ent3", "=", "platform", ".", "ENTITIES", "with", "patch", "(", "\"homeassistant.core.dt_util.utcnow\"", ")", "as", "mock_utcnow", ":", "mock_utcnow", ".", "return_value", "=", "point1", "assert", "await", "async_setup_component", "(", "hass", ",", "automation", ".", "DOMAIN", ",", "{", "automation", ".", "DOMAIN", ":", "[", "{", "\"trigger\"", ":", "{", "\"platform\"", ":", "\"event\"", ",", "\"event_type\"", ":", "\"test_event1\"", "}", ",", "\"condition\"", ":", "{", "\"condition\"", ":", "\"device\"", ",", "\"domain\"", ":", "DOMAIN", ",", "\"device_id\"", ":", "\"\"", ",", "\"entity_id\"", ":", "ent1", ".", "entity_id", ",", "\"type\"", ":", "\"is_off\"", ",", "\"for\"", ":", "{", "\"seconds\"", ":", "5", "}", ",", "}", ",", "\"action\"", ":", "{", "\"service\"", ":", "\"test.automation\"", ",", "\"data_template\"", ":", "{", "\"some\"", ":", "\"is_off {{ trigger.%s }}\"", "%", "\"}} - {{ trigger.\"", ".", "join", "(", "(", "\"platform\"", ",", "\"event.event_type\"", ")", ")", "}", ",", "}", ",", "}", "]", "}", ",", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "hass", ".", "states", ".", "get", "(", "ent1", ".", "entity_id", ")", ".", "state", "==", "STATE_ON", "assert", "len", "(", "calls", ")", "==", "0", "hass", ".", "bus", ".", "async_fire", "(", "\"test_event1\"", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "len", "(", "calls", ")", "==", "0", "# Time travel 10 secs into the future", "mock_utcnow", ".", "return_value", "=", "point2", "hass", ".", "bus", ".", "async_fire", "(", "\"test_event1\"", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "len", "(", "calls", ")", "==", "0", "hass", ".", "states", ".", "async_set", "(", "ent1", ".", "entity_id", ",", "STATE_OFF", ")", "hass", ".", "bus", ".", "async_fire", "(", "\"test_event1\"", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "len", "(", "calls", ")", "==", "0", "# Time travel 20 secs into the future", "mock_utcnow", ".", "return_value", "=", "point3", "hass", ".", "bus", ".", "async_fire", "(", "\"test_event1\"", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "len", "(", "calls", ")", "==", "1", "assert", "calls", "[", "0", "]", ".", "data", "[", "\"some\"", "]", "==", "\"is_off event - test_event1\"" ]
[ 166, 0 ]
[ 234, 68 ]
python
en
['en', 'en', 'en']
True
test_config_entry_not_ready
( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker )
Test the Elgato Key Light configuration entry not ready.
Test the Elgato Key Light configuration entry not ready.
async def test_config_entry_not_ready( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test the Elgato Key Light configuration entry not ready.""" aioclient_mock.get( "http://1.2.3.4:9123/elgato/accessory-info", exc=aiohttp.ClientError ) entry = await init_integration(hass, aioclient_mock) assert entry.state == ENTRY_STATE_SETUP_RETRY
[ "async", "def", "test_config_entry_not_ready", "(", "hass", ":", "HomeAssistant", ",", "aioclient_mock", ":", "AiohttpClientMocker", ")", "->", "None", ":", "aioclient_mock", ".", "get", "(", "\"http://1.2.3.4:9123/elgato/accessory-info\"", ",", "exc", "=", "aiohttp", ".", "ClientError", ")", "entry", "=", "await", "init_integration", "(", "hass", ",", "aioclient_mock", ")", "assert", "entry", ".", "state", "==", "ENTRY_STATE_SETUP_RETRY" ]
[ 11, 0 ]
[ 20, 49 ]
python
en
['en', 'en', 'en']
True
test_unload_config_entry
( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker )
Test the Elgato Key Light configuration entry unloading.
Test the Elgato Key Light configuration entry unloading.
async def test_unload_config_entry( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test the Elgato Key Light configuration entry unloading.""" entry = await init_integration(hass, aioclient_mock) assert hass.data[DOMAIN] await hass.config_entries.async_unload(entry.entry_id) await hass.async_block_till_done() assert not hass.data.get(DOMAIN)
[ "async", "def", "test_unload_config_entry", "(", "hass", ":", "HomeAssistant", ",", "aioclient_mock", ":", "AiohttpClientMocker", ")", "->", "None", ":", "entry", "=", "await", "init_integration", "(", "hass", ",", "aioclient_mock", ")", "assert", "hass", ".", "data", "[", "DOMAIN", "]", "await", "hass", ".", "config_entries", ".", "async_unload", "(", "entry", ".", "entry_id", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "not", "hass", ".", "data", ".", "get", "(", "DOMAIN", ")" ]
[ 23, 0 ]
[ 32, 36 ]
python
en
['en', 'zu', 'en']
True
setup_platform
(hass, config, add_entities, discovery_info=None)
Set up Hikvision camera.
Set up Hikvision camera.
def setup_platform(hass, config, add_entities, discovery_info=None): """Set up Hikvision camera.""" host = config.get(CONF_HOST) port = config.get(CONF_PORT) name = config.get(CONF_NAME) username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) try: hikvision_cam = hikvision.api.CreateDevice( host, port=port, username=username, password=password, is_https=False ) except MissingParamError as param_err: _LOGGING.error("Missing required param: %s", param_err) return False except HikvisionError as conn_err: _LOGGING.error("Unable to connect: %s", conn_err) return False add_entities([HikvisionMotionSwitch(name, hikvision_cam)])
[ "def", "setup_platform", "(", "hass", ",", "config", ",", "add_entities", ",", "discovery_info", "=", "None", ")", ":", "host", "=", "config", ".", "get", "(", "CONF_HOST", ")", "port", "=", "config", ".", "get", "(", "CONF_PORT", ")", "name", "=", "config", ".", "get", "(", "CONF_NAME", ")", "username", "=", "config", ".", "get", "(", "CONF_USERNAME", ")", "password", "=", "config", ".", "get", "(", "CONF_PASSWORD", ")", "try", ":", "hikvision_cam", "=", "hikvision", ".", "api", ".", "CreateDevice", "(", "host", ",", "port", "=", "port", ",", "username", "=", "username", ",", "password", "=", "password", ",", "is_https", "=", "False", ")", "except", "MissingParamError", "as", "param_err", ":", "_LOGGING", ".", "error", "(", "\"Missing required param: %s\"", ",", "param_err", ")", "return", "False", "except", "HikvisionError", "as", "conn_err", ":", "_LOGGING", ".", "error", "(", "\"Unable to connect: %s\"", ",", "conn_err", ")", "return", "False", "add_entities", "(", "[", "HikvisionMotionSwitch", "(", "name", ",", "hikvision_cam", ")", "]", ")" ]
[ 39, 0 ]
[ 58, 62 ]
python
en
['en', 'da', 'en']
True
HikvisionMotionSwitch.__init__
(self, name, hikvision_cam)
Initialize the switch.
Initialize the switch.
def __init__(self, name, hikvision_cam): """Initialize the switch.""" self._name = name self._hikvision_cam = hikvision_cam self._state = STATE_OFF
[ "def", "__init__", "(", "self", ",", "name", ",", "hikvision_cam", ")", ":", "self", ".", "_name", "=", "name", "self", ".", "_hikvision_cam", "=", "hikvision_cam", "self", ".", "_state", "=", "STATE_OFF" ]
[ 64, 4 ]
[ 68, 31 ]
python
en
['en', 'en', 'en']
True
HikvisionMotionSwitch.name
(self)
Return the name of the device if any.
Return the name of the device if any.
def name(self): """Return the name of the device if any.""" return self._name
[ "def", "name", "(", "self", ")", ":", "return", "self", ".", "_name" ]
[ 71, 4 ]
[ 73, 25 ]
python
en
['en', 'en', 'en']
True
HikvisionMotionSwitch.state
(self)
Return the state of the device if any.
Return the state of the device if any.
def state(self): """Return the state of the device if any.""" return self._state
[ "def", "state", "(", "self", ")", ":", "return", "self", ".", "_state" ]
[ 76, 4 ]
[ 78, 26 ]
python
en
['en', 'en', 'en']
True
HikvisionMotionSwitch.is_on
(self)
Return true if device is on.
Return true if device is on.
def is_on(self): """Return true if device is on.""" return self._state == STATE_ON
[ "def", "is_on", "(", "self", ")", ":", "return", "self", ".", "_state", "==", "STATE_ON" ]
[ 81, 4 ]
[ 83, 38 ]
python
en
['en', 'fy', 'en']
True
HikvisionMotionSwitch.turn_on
(self, **kwargs)
Turn the device on.
Turn the device on.
def turn_on(self, **kwargs): """Turn the device on.""" _LOGGING.info("Turning on Motion Detection ") self._hikvision_cam.enable_motion_detection()
[ "def", "turn_on", "(", "self", ",", "*", "*", "kwargs", ")", ":", "_LOGGING", ".", "info", "(", "\"Turning on Motion Detection \"", ")", "self", ".", "_hikvision_cam", ".", "enable_motion_detection", "(", ")" ]
[ 85, 4 ]
[ 88, 53 ]
python
en
['en', 'en', 'en']
True
HikvisionMotionSwitch.turn_off
(self, **kwargs)
Turn the device off.
Turn the device off.
def turn_off(self, **kwargs): """Turn the device off.""" _LOGGING.info("Turning off Motion Detection ") self._hikvision_cam.disable_motion_detection()
[ "def", "turn_off", "(", "self", ",", "*", "*", "kwargs", ")", ":", "_LOGGING", ".", "info", "(", "\"Turning off Motion Detection \"", ")", "self", ".", "_hikvision_cam", ".", "disable_motion_detection", "(", ")" ]
[ 90, 4 ]
[ 93, 54 ]
python
en
['en', 'en', 'en']
True
HikvisionMotionSwitch.update
(self)
Update Motion Detection state.
Update Motion Detection state.
def update(self): """Update Motion Detection state.""" enabled = self._hikvision_cam.is_motion_detection_enabled() _LOGGING.info("enabled: %s", enabled) self._state = STATE_ON if enabled else STATE_OFF
[ "def", "update", "(", "self", ")", ":", "enabled", "=", "self", ".", "_hikvision_cam", ".", "is_motion_detection_enabled", "(", ")", "_LOGGING", ".", "info", "(", "\"enabled: %s\"", ",", "enabled", ")", "self", ".", "_state", "=", "STATE_ON", "if", "enabled", "else", "STATE_OFF" ]
[ 95, 4 ]
[ 100, 56 ]
python
en
['da', 'en', 'en']
True
async_activate_log_queue_handler
(hass: HomeAssistant)
Migrate the existing log handlers to use the queue. This allows us to avoid blocking I/O and formatting messages in the event loop as log messages are written in another thread.
Migrate the existing log handlers to use the queue.
def async_activate_log_queue_handler(hass: HomeAssistant) -> None: """ Migrate the existing log handlers to use the queue. This allows us to avoid blocking I/O and formatting messages in the event loop as log messages are written in another thread. """ simple_queue = queue.SimpleQueue() # type: ignore queue_handler = HomeAssistantQueueHandler(simple_queue) logging.root.addHandler(queue_handler) migrated_handlers = [] for handler in logging.root.handlers[:]: if handler is queue_handler: continue logging.root.removeHandler(handler) migrated_handlers.append(handler) listener = logging.handlers.QueueListener(simple_queue, *migrated_handlers) listener.start() @callback def _async_stop_queue_handler(_: Any) -> None: """Cleanup handler.""" logging.root.removeHandler(queue_handler) listener.stop() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_stop_queue_handler)
[ "def", "async_activate_log_queue_handler", "(", "hass", ":", "HomeAssistant", ")", "->", "None", ":", "simple_queue", "=", "queue", ".", "SimpleQueue", "(", ")", "# type: ignore", "queue_handler", "=", "HomeAssistantQueueHandler", "(", "simple_queue", ")", "logging", ".", "root", ".", "addHandler", "(", "queue_handler", ")", "migrated_handlers", "=", "[", "]", "for", "handler", "in", "logging", ".", "root", ".", "handlers", "[", ":", "]", ":", "if", "handler", "is", "queue_handler", ":", "continue", "logging", ".", "root", ".", "removeHandler", "(", "handler", ")", "migrated_handlers", ".", "append", "(", "handler", ")", "listener", "=", "logging", ".", "handlers", ".", "QueueListener", "(", "simple_queue", ",", "*", "migrated_handlers", ")", "listener", ".", "start", "(", ")", "@", "callback", "def", "_async_stop_queue_handler", "(", "_", ":", "Any", ")", "->", "None", ":", "\"\"\"Cleanup handler.\"\"\"", "logging", ".", "root", ".", "removeHandler", "(", "queue_handler", ")", "listener", ".", "stop", "(", ")", "hass", ".", "bus", ".", "async_listen_once", "(", "EVENT_HOMEASSISTANT_CLOSE", ",", "_async_stop_queue_handler", ")" ]
[ 61, 0 ]
[ 89, 84 ]
python
en
['en', 'error', 'th']
False