whitphx HF staff commited on
Commit
4362cad
·
1 Parent(s): 05b4295

Update streamlit-webrtc to 0.51.0 and delete sample_utils.turn

Browse files
app.py CHANGED
@@ -15,7 +15,6 @@ import streamlit as st
15
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
16
 
17
  from sample_utils.download import download_file
18
- from sample_utils.turn import get_ice_servers
19
 
20
  HERE = Path(__file__).parent
21
  ROOT = HERE
@@ -93,7 +92,10 @@ def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
93
 
94
  # Run inference
95
  blob = cv2.dnn.blobFromImage(
96
- cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5
 
 
 
97
  )
98
  net.setInput(blob)
99
  output = net.forward()
@@ -138,7 +140,6 @@ def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
138
  webrtc_ctx = webrtc_streamer(
139
  key="object-detection",
140
  mode=WebRtcMode.SENDRECV,
141
- rtc_configuration={"iceServers": get_ice_servers()},
142
  video_frame_callback=video_frame_callback,
143
  media_stream_constraints={"video": True, "audio": False},
144
  async_processing=True,
 
15
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
16
 
17
  from sample_utils.download import download_file
 
18
 
19
  HERE = Path(__file__).parent
20
  ROOT = HERE
 
92
 
93
  # Run inference
94
  blob = cv2.dnn.blobFromImage(
95
+ image=cv2.resize(image, (300, 300)),
96
+ scalefactor=0.007843,
97
+ size=(300, 300),
98
+ mean=(127.5, 127.5, 127.5),
99
  )
100
  net.setInput(blob)
101
  output = net.forward()
 
140
  webrtc_ctx = webrtc_streamer(
141
  key="object-detection",
142
  mode=WebRtcMode.SENDRECV,
 
143
  video_frame_callback=video_frame_callback,
144
  media_stream_constraints={"video": True, "audio": False},
145
  async_processing=True,
pages/.isort.cfg DELETED
@@ -1,3 +0,0 @@
1
- [settings]
2
- profile=black
3
- known_third_party=streamlit_webrtc
 
 
 
 
pages/10_sendonly_audio.py CHANGED
@@ -11,8 +11,6 @@ import pydub
11
  import streamlit as st
12
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
13
 
14
- from sample_utils.turn import get_ice_servers
15
-
16
  logger = logging.getLogger(__name__)
17
 
18
 
@@ -20,7 +18,6 @@ webrtc_ctx = webrtc_streamer(
20
  key="sendonly-audio",
21
  mode=WebRtcMode.SENDONLY,
22
  audio_receiver_size=256,
23
- rtc_configuration={"iceServers": get_ice_servers()},
24
  media_stream_constraints={"audio": True},
25
  )
26
 
 
11
  import streamlit as st
12
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
13
 
 
 
14
  logger = logging.getLogger(__name__)
15
 
16
 
 
18
  key="sendonly-audio",
19
  mode=WebRtcMode.SENDONLY,
20
  audio_receiver_size=256,
 
21
  media_stream_constraints={"audio": True},
22
  )
23
 
pages/11_programatic_control_playing.py CHANGED
@@ -3,13 +3,10 @@
3
  import streamlit as st
4
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
5
 
6
- from sample_utils.turn import get_ice_servers
7
-
8
  playing = st.checkbox("Playing", value=True)
9
 
10
  webrtc_streamer(
11
  key="programatic_control",
12
  desired_playing_state=playing,
13
  mode=WebRtcMode.SENDRECV,
14
- rtc_configuration={"iceServers": get_ice_servers()},
15
  )
 
3
  import streamlit as st
4
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
5
 
 
 
6
  playing = st.checkbox("Playing", value=True)
7
 
8
  webrtc_streamer(
9
  key="programatic_control",
10
  desired_playing_state=playing,
11
  mode=WebRtcMode.SENDRECV,
 
12
  )
pages/12_media_constraints_configs.py CHANGED
@@ -3,13 +3,10 @@
3
  import streamlit as st
4
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
5
 
6
- from sample_utils.turn import get_ice_servers
7
-
8
  frame_rate = 5
9
  webrtc_streamer(
10
  key="media-constraints",
11
  mode=WebRtcMode.SENDRECV,
12
- rtc_configuration={"iceServers": get_ice_servers()},
13
  media_stream_constraints={
14
  "video": {"frameRate": {"ideal": frame_rate}},
15
  },
 
3
  import streamlit as st
4
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
5
 
 
 
6
  frame_rate = 5
7
  webrtc_streamer(
8
  key="media-constraints",
9
  mode=WebRtcMode.SENDRECV,
 
10
  media_stream_constraints={
11
  "video": {"frameRate": {"ideal": frame_rate}},
12
  },
pages/13_ui_texts_customization.py CHANGED
@@ -1,10 +1,7 @@
1
  from streamlit_webrtc import webrtc_streamer
2
 
3
- from sample_utils.turn import get_ice_servers
4
-
5
  webrtc_streamer(
6
  key="custom_ui_texts",
7
- rtc_configuration={"iceServers": get_ice_servers()},
8
  translations={
9
  "start": "開始",
10
  "stop": "停止",
 
1
  from streamlit_webrtc import webrtc_streamer
2
 
 
 
3
  webrtc_streamer(
4
  key="custom_ui_texts",
 
5
  translations={
6
  "start": "開始",
7
  "stop": "停止",
pages/1_object_detection.py CHANGED
@@ -15,7 +15,6 @@ import streamlit as st
15
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
16
 
17
  from sample_utils.download import download_file
18
- from sample_utils.turn import get_ice_servers
19
 
20
  HERE = Path(__file__).parent
21
  ROOT = HERE.parent
@@ -93,7 +92,10 @@ def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
93
 
94
  # Run inference
95
  blob = cv2.dnn.blobFromImage(
96
- cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5
 
 
 
97
  )
98
  net.setInput(blob)
99
  output = net.forward()
@@ -138,7 +140,6 @@ def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
138
  webrtc_ctx = webrtc_streamer(
139
  key="object-detection",
140
  mode=WebRtcMode.SENDRECV,
141
- rtc_configuration={"iceServers": get_ice_servers()},
142
  video_frame_callback=video_frame_callback,
143
  media_stream_constraints={"video": True, "audio": False},
144
  async_processing=True,
 
15
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
16
 
17
  from sample_utils.download import download_file
 
18
 
19
  HERE = Path(__file__).parent
20
  ROOT = HERE.parent
 
92
 
93
  # Run inference
94
  blob = cv2.dnn.blobFromImage(
95
+ image=cv2.resize(image, (300, 300)),
96
+ scalefactor=0.007843,
97
+ size=(300, 300),
98
+ mean=(127.5, 127.5, 127.5),
99
  )
100
  net.setInput(blob)
101
  output = net.forward()
 
140
  webrtc_ctx = webrtc_streamer(
141
  key="object-detection",
142
  mode=WebRtcMode.SENDRECV,
 
143
  video_frame_callback=video_frame_callback,
144
  media_stream_constraints={"video": True, "audio": False},
145
  async_processing=True,
pages/2_opencv_filters.py CHANGED
@@ -5,8 +5,6 @@ import cv2
5
  import streamlit as st
6
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
7
 
8
- from sample_utils.turn import get_ice_servers
9
-
10
  _type = st.radio("Select transform type", ("noop", "cartoon", "edges", "rotate"))
11
 
12
 
@@ -51,7 +49,6 @@ def callback(frame: av.VideoFrame) -> av.VideoFrame:
51
  webrtc_streamer(
52
  key="opencv-filter",
53
  mode=WebRtcMode.SENDRECV,
54
- rtc_configuration={"iceServers": get_ice_servers()},
55
  video_frame_callback=callback,
56
  media_stream_constraints={"video": True, "audio": False},
57
  async_processing=True,
 
5
  import streamlit as st
6
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
7
 
 
 
8
  _type = st.radio("Select transform type", ("noop", "cartoon", "edges", "rotate"))
9
 
10
 
 
49
  webrtc_streamer(
50
  key="opencv-filter",
51
  mode=WebRtcMode.SENDRECV,
 
52
  video_frame_callback=callback,
53
  media_stream_constraints={"video": True, "audio": False},
54
  async_processing=True,
pages/3_audio_filter.py CHANGED
@@ -4,8 +4,6 @@ import pydub
4
  import streamlit as st
5
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
6
 
7
- from sample_utils.turn import get_ice_servers
8
-
9
  gain = st.slider("Gain", -10.0, +20.0, 1.0, 0.05)
10
 
11
 
@@ -34,7 +32,6 @@ def process_audio(frame: av.AudioFrame) -> av.AudioFrame:
34
  webrtc_streamer(
35
  key="audio-filter",
36
  mode=WebRtcMode.SENDRECV,
37
- rtc_configuration={"iceServers": get_ice_servers()},
38
  audio_frame_callback=process_audio,
39
  async_processing=True,
40
  )
 
4
  import streamlit as st
5
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
6
 
 
 
7
  gain = st.slider("Gain", -10.0, +20.0, 1.0, 0.05)
8
 
9
 
 
32
  webrtc_streamer(
33
  key="audio-filter",
34
  mode=WebRtcMode.SENDRECV,
 
35
  audio_frame_callback=process_audio,
36
  async_processing=True,
37
  )
pages/4_delayed_echo.py CHANGED
@@ -6,8 +6,6 @@ import av
6
  import streamlit as st
7
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
8
 
9
- from sample_utils.turn import get_ice_servers
10
-
11
  logger = logging.getLogger(__name__)
12
 
13
 
@@ -36,7 +34,6 @@ async def queued_audio_frames_callback(
36
  webrtc_streamer(
37
  key="delay",
38
  mode=WebRtcMode.SENDRECV,
39
- rtc_configuration={"iceServers": get_ice_servers()},
40
  queued_video_frames_callback=queued_video_frames_callback,
41
  queued_audio_frames_callback=queued_audio_frames_callback,
42
  async_processing=True,
 
6
  import streamlit as st
7
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
8
 
 
 
9
  logger = logging.getLogger(__name__)
10
 
11
 
 
34
  webrtc_streamer(
35
  key="delay",
36
  mode=WebRtcMode.SENDRECV,
 
37
  queued_video_frames_callback=queued_video_frames_callback,
38
  queued_audio_frames_callback=queued_audio_frames_callback,
39
  async_processing=True,
pages/5_fork_multi_outputs.py CHANGED
@@ -10,8 +10,6 @@ import cv2
10
  import streamlit as st
11
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
12
 
13
- from sample_utils.turn import get_ice_servers
14
-
15
  st.markdown(
16
  """
17
  Fork one input to multiple outputs with different video filters.
@@ -62,13 +60,10 @@ def make_video_frame_callback(_type: VideoFilterType):
62
  return callback
63
 
64
 
65
- COMMON_RTC_CONFIG = {"iceServers": get_ice_servers()}
66
-
67
  st.header("Input")
68
  ctx = webrtc_streamer(
69
  key="loopback",
70
  mode=WebRtcMode.SENDRECV,
71
- rtc_configuration=COMMON_RTC_CONFIG,
72
  media_stream_constraints={"video": True, "audio": False},
73
  )
74
 
@@ -85,7 +80,6 @@ webrtc_streamer(
85
  video_frame_callback=callback,
86
  source_video_track=ctx.output_video_track,
87
  desired_playing_state=ctx.state.playing,
88
- rtc_configuration=COMMON_RTC_CONFIG,
89
  media_stream_constraints={"video": True, "audio": False},
90
  )
91
 
@@ -102,6 +96,5 @@ webrtc_streamer(
102
  video_frame_callback=callback,
103
  source_video_track=ctx.output_video_track,
104
  desired_playing_state=ctx.state.playing,
105
- rtc_configuration=COMMON_RTC_CONFIG,
106
  media_stream_constraints={"video": True, "audio": False},
107
  )
 
10
  import streamlit as st
11
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
12
 
 
 
13
  st.markdown(
14
  """
15
  Fork one input to multiple outputs with different video filters.
 
60
  return callback
61
 
62
 
 
 
63
  st.header("Input")
64
  ctx = webrtc_streamer(
65
  key="loopback",
66
  mode=WebRtcMode.SENDRECV,
 
67
  media_stream_constraints={"video": True, "audio": False},
68
  )
69
 
 
80
  video_frame_callback=callback,
81
  source_video_track=ctx.output_video_track,
82
  desired_playing_state=ctx.state.playing,
 
83
  media_stream_constraints={"video": True, "audio": False},
84
  )
85
 
 
96
  video_frame_callback=callback,
97
  source_video_track=ctx.output_video_track,
98
  desired_playing_state=ctx.state.playing,
 
99
  media_stream_constraints={"video": True, "audio": False},
100
  )
pages/6_mix_multi_inputs.py CHANGED
@@ -11,14 +11,13 @@ import cv2
11
  import numpy as np
12
  import streamlit as st
13
  from streamlit_webrtc import (
 
14
  WebRtcMode,
15
  create_mix_track,
16
  create_process_track,
17
  webrtc_streamer,
18
  )
19
 
20
- from sample_utils.turn import get_ice_servers
21
-
22
  st.markdown(
23
  """
24
  Mix multiple inputs with different video filters into one stream.
@@ -114,13 +113,10 @@ def mixer_callback(frames: List[av.VideoFrame]) -> av.VideoFrame:
114
  return new_frame
115
 
116
 
117
- COMMON_RTC_CONFIG = {"iceServers": get_ice_servers()}
118
-
119
  st.header("Input 1")
120
  input1_ctx = webrtc_streamer(
121
  key="input1_ctx",
122
  mode=WebRtcMode.SENDRECV,
123
- rtc_configuration=COMMON_RTC_CONFIG,
124
  media_stream_constraints={"video": True, "audio": False},
125
  )
126
  filter1_type = st.radio(
@@ -140,7 +136,6 @@ st.header("Input 2")
140
  input2_ctx = webrtc_streamer(
141
  key="input2_ctx",
142
  mode=WebRtcMode.SENDRECV,
143
- rtc_configuration=COMMON_RTC_CONFIG,
144
  media_stream_constraints={"video": True, "audio": False},
145
  )
146
  filter2_type = st.radio(
@@ -159,7 +154,6 @@ st.header("Input 3 (no filter)")
159
  input3_ctx = webrtc_streamer(
160
  key="input3_ctx",
161
  mode=WebRtcMode.SENDRECV,
162
- rtc_configuration=COMMON_RTC_CONFIG,
163
  media_stream_constraints={"video": True, "audio": False},
164
  )
165
 
@@ -168,7 +162,6 @@ mix_track = create_mix_track(kind="video", mixer_callback=mixer_callback, key="m
168
  mix_ctx = webrtc_streamer(
169
  key="mix",
170
  mode=WebRtcMode.RECVONLY,
171
- rtc_configuration=COMMON_RTC_CONFIG,
172
  source_video_track=mix_track,
173
  desired_playing_state=input1_ctx.state.playing
174
  or input2_ctx.state.playing
@@ -176,9 +169,15 @@ mix_ctx = webrtc_streamer(
176
  )
177
 
178
  if mix_ctx.source_video_track and input1_video_process_track:
179
- mix_ctx.source_video_track.add_input_track(input1_video_process_track)
 
 
180
  if mix_ctx.source_video_track and input2_video_process_track:
181
- mix_ctx.source_video_track.add_input_track(input2_video_process_track)
 
 
182
  if mix_ctx.source_video_track and input3_ctx.output_video_track:
183
  # Input3 is sourced without any filter.
184
- mix_ctx.source_video_track.add_input_track(input3_ctx.output_video_track)
 
 
 
11
  import numpy as np
12
  import streamlit as st
13
  from streamlit_webrtc import (
14
+ MediaStreamMixTrack,
15
  WebRtcMode,
16
  create_mix_track,
17
  create_process_track,
18
  webrtc_streamer,
19
  )
20
 
 
 
21
  st.markdown(
22
  """
23
  Mix multiple inputs with different video filters into one stream.
 
113
  return new_frame
114
 
115
 
 
 
116
  st.header("Input 1")
117
  input1_ctx = webrtc_streamer(
118
  key="input1_ctx",
119
  mode=WebRtcMode.SENDRECV,
 
120
  media_stream_constraints={"video": True, "audio": False},
121
  )
122
  filter1_type = st.radio(
 
136
  input2_ctx = webrtc_streamer(
137
  key="input2_ctx",
138
  mode=WebRtcMode.SENDRECV,
 
139
  media_stream_constraints={"video": True, "audio": False},
140
  )
141
  filter2_type = st.radio(
 
154
  input3_ctx = webrtc_streamer(
155
  key="input3_ctx",
156
  mode=WebRtcMode.SENDRECV,
 
157
  media_stream_constraints={"video": True, "audio": False},
158
  )
159
 
 
162
  mix_ctx = webrtc_streamer(
163
  key="mix",
164
  mode=WebRtcMode.RECVONLY,
 
165
  source_video_track=mix_track,
166
  desired_playing_state=input1_ctx.state.playing
167
  or input2_ctx.state.playing
 
169
  )
170
 
171
  if mix_ctx.source_video_track and input1_video_process_track:
172
+ cast(MediaStreamMixTrack, mix_ctx.source_video_track).add_input_track(
173
+ input1_video_process_track
174
+ )
175
  if mix_ctx.source_video_track and input2_video_process_track:
176
+ cast(MediaStreamMixTrack, mix_ctx.source_video_track).add_input_track(
177
+ input2_video_process_track
178
+ )
179
  if mix_ctx.source_video_track and input3_ctx.output_video_track:
180
  # Input3 is sourced without any filter.
181
+ cast(MediaStreamMixTrack, mix_ctx.source_video_track).add_input_track(
182
+ input3_ctx.output_video_track
183
+ )
pages/7_record.py CHANGED
@@ -7,8 +7,6 @@ import streamlit as st
7
  from aiortc.contrib.media import MediaRecorder
8
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
9
 
10
- from sample_utils.turn import get_ice_servers
11
-
12
 
13
  def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
14
  img = frame.to_ndarray(format="bgr24")
@@ -41,7 +39,6 @@ def app():
41
  webrtc_streamer(
42
  key="record",
43
  mode=WebRtcMode.SENDRECV,
44
- rtc_configuration={"iceServers": get_ice_servers()},
45
  media_stream_constraints={
46
  "video": True,
47
  "audio": True,
 
7
  from aiortc.contrib.media import MediaRecorder
8
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
9
 
 
 
10
 
11
  def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
12
  img = frame.to_ndarray(format="bgr24")
 
39
  webrtc_streamer(
40
  key="record",
41
  mode=WebRtcMode.SENDRECV,
 
42
  media_stream_constraints={
43
  "video": True,
44
  "audio": True,
pages/8_media_files_streaming.py CHANGED
@@ -11,7 +11,6 @@ from aiortc.contrib.media import MediaPlayer
11
  from streamlit_webrtc import WebRtcMode, WebRtcStreamerContext, webrtc_streamer
12
 
13
  from sample_utils.download import download_file
14
- from sample_utils.turn import get_ice_servers
15
 
16
  HERE = Path(__file__).parent
17
  ROOT = HERE.parent
@@ -114,7 +113,6 @@ def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
114
  webrtc_streamer(
115
  key=key,
116
  mode=WebRtcMode.RECVONLY,
117
- rtc_configuration={"iceServers": get_ice_servers()},
118
  media_stream_constraints={
119
  "video": media_file_info["type"] == "video",
120
  "audio": media_file_info["type"] == "audio",
 
11
  from streamlit_webrtc import WebRtcMode, WebRtcStreamerContext, webrtc_streamer
12
 
13
  from sample_utils.download import download_file
 
14
 
15
  HERE = Path(__file__).parent
16
  ROOT = HERE.parent
 
113
  webrtc_streamer(
114
  key=key,
115
  mode=WebRtcMode.RECVONLY,
 
116
  media_stream_constraints={
117
  "video": media_file_info["type"] == "video",
118
  "audio": media_file_info["type"] == "audio",
pages/9_sendonly_video.py CHANGED
@@ -7,15 +7,12 @@ import queue
7
  import streamlit as st
8
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
9
 
10
- from sample_utils.turn import get_ice_servers
11
-
12
  logger = logging.getLogger(__name__)
13
 
14
 
15
  webrtc_ctx = webrtc_streamer(
16
  key="video-sendonly",
17
  mode=WebRtcMode.SENDONLY,
18
- rtc_configuration={"iceServers": get_ice_servers()},
19
  media_stream_constraints={"video": True},
20
  )
21
 
 
7
  import streamlit as st
8
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
9
 
 
 
10
  logger = logging.getLogger(__name__)
11
 
12
 
13
  webrtc_ctx = webrtc_streamer(
14
  key="video-sendonly",
15
  mode=WebRtcMode.SENDONLY,
 
16
  media_stream_constraints={"video": True},
17
  )
18
 
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  opencv-python-headless
2
  pydub==0.25.1
3
- streamlit_webrtc~=0.48.1
4
  twilio~=9.4.3
5
  matplotlib
 
1
  opencv-python-headless
2
  pydub==0.25.1
3
+ streamlit_webrtc~=0.51.0
4
  twilio~=9.4.3
5
  matplotlib
sample_utils/turn.py DELETED
@@ -1,39 +0,0 @@
1
- import logging
2
- import os
3
-
4
- import streamlit as st
5
- from twilio.base.exceptions import TwilioRestException
6
- from twilio.rest import Client
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
-
11
- def get_ice_servers():
12
- """Use Twilio's TURN server because Streamlit Community Cloud has changed
13
- its infrastructure and WebRTC connection cannot be established without TURN server now. # noqa: E501
14
- We considered Open Relay Project (https://www.metered.ca/tools/openrelay/) too,
15
- but it is not stable and hardly works as some people reported like https://github.com/aiortc/aiortc/issues/832#issuecomment-1482420656 # noqa: E501
16
- See https://github.com/whitphx/streamlit-webrtc/issues/1213
17
- """
18
-
19
- # Ref: https://www.twilio.com/docs/stun-turn/api
20
- try:
21
- account_sid = os.environ["TWILIO_ACCOUNT_SID"]
22
- auth_token = os.environ["TWILIO_AUTH_TOKEN"]
23
- except KeyError:
24
- logger.warning(
25
- "Twilio credentials are not set. Fallback to a free STUN server from Google." # noqa: E501
26
- )
27
- return [{"urls": ["stun:stun.l.google.com:19302"]}]
28
-
29
- client = Client(account_sid, auth_token)
30
-
31
- try:
32
- token = client.tokens.create()
33
- except TwilioRestException as e:
34
- st.warning(
35
- f"Error occurred while accessing Twilio API. Fallback to a free STUN server from Google. ({e})" # noqa: E501
36
- )
37
- return [{"urls": ["stun:stun.l.google.com:19302"]}]
38
-
39
- return token.ice_servers