Pratyush101 commited on
Commit
4a24b2e
·
verified ·
1 Parent(s): 8ecc89a

Delete pages/6_mix_multi_inputs.py

Browse files
Files changed (1) hide show
  1. pages/6_mix_multi_inputs.py +0 -184
pages/6_mix_multi_inputs.py DELETED
@@ -1,184 +0,0 @@
1
- import math
2
- from typing import List
3
-
4
- try:
5
- from typing import Literal, cast
6
- except ImportError:
7
- from typing_extensions import Literal # type: ignore
8
-
9
- import av
10
- import cv2
11
- import numpy as np
12
- import streamlit as st
13
- from streamlit_webrtc import (
14
- WebRtcMode,
15
- create_mix_track,
16
- create_process_track,
17
- webrtc_streamer,
18
- )
19
-
20
- from sample_utils.turn import get_ice_servers
21
-
22
- st.markdown(
23
- """
24
- Mix multiple inputs with different video filters into one stream.
25
- """
26
- )
27
-
28
- VideoFilterType = Literal["noop", "cartoon", "edges", "rotate"]
29
-
30
-
31
- def make_video_frame_callback(_type: VideoFilterType):
32
- def callback(frame: av.VideoFrame) -> av.VideoFrame:
33
- img = frame.to_ndarray(format="bgr24")
34
-
35
- if _type == "noop":
36
- pass
37
- elif _type == "cartoon":
38
- # prepare color
39
- img_color = cv2.pyrDown(cv2.pyrDown(img))
40
- for _ in range(6):
41
- img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
42
- img_color = cv2.pyrUp(cv2.pyrUp(img_color))
43
-
44
- # prepare edges
45
- img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
46
- img_edges = cv2.adaptiveThreshold(
47
- cv2.medianBlur(img_edges, 7),
48
- 255,
49
- cv2.ADAPTIVE_THRESH_MEAN_C,
50
- cv2.THRESH_BINARY,
51
- 9,
52
- 2,
53
- )
54
- img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
55
-
56
- # combine color and edges
57
- img = cv2.bitwise_and(img_color, img_edges)
58
- elif _type == "edges":
59
- # perform edge detection
60
- img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR)
61
- elif _type == "rotate":
62
- # rotate image
63
- rows, cols, _ = img.shape
64
- M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1)
65
- img = cv2.warpAffine(img, M, (cols, rows))
66
-
67
- return av.VideoFrame.from_ndarray(img, format="bgr24")
68
-
69
- return callback
70
-
71
-
72
- def mixer_callback(frames: List[av.VideoFrame]) -> av.VideoFrame:
73
- buf_w = 640
74
- buf_h = 480
75
- buffer = np.zeros((buf_h, buf_w, 3), dtype=np.uint8)
76
-
77
- n_inputs = len(frames)
78
-
79
- n_cols = math.ceil(math.sqrt(n_inputs))
80
- n_rows = math.ceil(n_inputs / n_cols)
81
- grid_w = buf_w // n_cols
82
- grid_h = buf_h // n_rows
83
-
84
- for i in range(n_inputs):
85
- frame = frames[i]
86
- if frame is None:
87
- continue
88
-
89
- grid_x = (i % n_cols) * grid_w
90
- grid_y = (i // n_cols) * grid_h
91
-
92
- img = frame.to_ndarray(format="bgr24")
93
- src_h, src_w = img.shape[0:2]
94
-
95
- aspect_ratio = src_w / src_h
96
-
97
- window_w = min(grid_w, int(grid_h * aspect_ratio))
98
- window_h = min(grid_h, int(window_w / aspect_ratio))
99
-
100
- window_offset_x = (grid_w - window_w) // 2
101
- window_offset_y = (grid_h - window_h) // 2
102
-
103
- window_x0 = grid_x + window_offset_x
104
- window_y0 = grid_y + window_offset_y
105
- window_x1 = window_x0 + window_w
106
- window_y1 = window_y0 + window_h
107
-
108
- buffer[window_y0:window_y1, window_x0:window_x1, :] = cv2.resize(
109
- img, (window_w, window_h)
110
- )
111
-
112
- new_frame = av.VideoFrame.from_ndarray(buffer, format="bgr24")
113
-
114
- return new_frame
115
-
116
-
117
- COMMON_RTC_CONFIG = {"iceServers": get_ice_servers()}
118
-
119
- st.header("Input 1")
120
- input1_ctx = webrtc_streamer(
121
- key="input1_ctx",
122
- mode=WebRtcMode.SENDRECV,
123
- rtc_configuration=COMMON_RTC_CONFIG,
124
- media_stream_constraints={"video": True, "audio": False},
125
- )
126
- filter1_type = st.radio(
127
- "Select transform type",
128
- ("noop", "cartoon", "edges", "rotate"),
129
- key="mix-filter1-type",
130
- )
131
- callback = make_video_frame_callback(cast(VideoFilterType, filter1_type))
132
- input1_video_process_track = None
133
- if input1_ctx.output_video_track:
134
- input1_video_process_track = create_process_track(
135
- input_track=input1_ctx.output_video_track,
136
- frame_callback=callback,
137
- )
138
-
139
- st.header("Input 2")
140
- input2_ctx = webrtc_streamer(
141
- key="input2_ctx",
142
- mode=WebRtcMode.SENDRECV,
143
- rtc_configuration=COMMON_RTC_CONFIG,
144
- media_stream_constraints={"video": True, "audio": False},
145
- )
146
- filter2_type = st.radio(
147
- "Select transform type",
148
- ("noop", "cartoon", "edges", "rotate"),
149
- key="mix-filter2-type",
150
- )
151
- callback = make_video_frame_callback(cast(VideoFilterType, filter2_type))
152
- input2_video_process_track = None
153
- if input2_ctx.output_video_track:
154
- input2_video_process_track = create_process_track(
155
- input_track=input2_ctx.output_video_track, frame_callback=callback
156
- )
157
-
158
- st.header("Input 3 (no filter)")
159
- input3_ctx = webrtc_streamer(
160
- key="input3_ctx",
161
- mode=WebRtcMode.SENDRECV,
162
- rtc_configuration=COMMON_RTC_CONFIG,
163
- media_stream_constraints={"video": True, "audio": False},
164
- )
165
-
166
- st.header("Mixed output")
167
- mix_track = create_mix_track(kind="video", mixer_callback=mixer_callback, key="mix")
168
- mix_ctx = webrtc_streamer(
169
- key="mix",
170
- mode=WebRtcMode.RECVONLY,
171
- rtc_configuration=COMMON_RTC_CONFIG,
172
- source_video_track=mix_track,
173
- desired_playing_state=input1_ctx.state.playing
174
- or input2_ctx.state.playing
175
- or input3_ctx.state.playing,
176
- )
177
-
178
- if mix_ctx.source_video_track and input1_video_process_track:
179
- mix_ctx.source_video_track.add_input_track(input1_video_process_track)
180
- if mix_ctx.source_video_track and input2_video_process_track:
181
- mix_ctx.source_video_track.add_input_track(input2_video_process_track)
182
- if mix_ctx.source_video_track and input3_ctx.output_video_track:
183
- # Input3 is sourced without any filter.
184
- mix_ctx.source_video_track.add_input_track(input3_ctx.output_video_track)