Commit
·
594d6e4
1
Parent(s):
c217093
Update app.py
Browse files
app.py
CHANGED
@@ -257,8 +257,8 @@ def play_video(selected_video):
|
|
257 |
def translate_sign_language(gesture):
|
258 |
# Create Dataset
|
259 |
prod_ds = dataset_prod_obj.create_dataset(gesture)
|
260 |
-
|
261 |
-
prod_video = np.random.randint(0, 255, (32, 225, 225, 3), dtype=np.uint8)
|
262 |
|
263 |
# Run ML Model
|
264 |
predicted_prod_label = prod_function(model_pretrained, prod_ds)
|
@@ -269,30 +269,32 @@ def translate_sign_language(gesture):
|
|
269 |
idx_to_label = model_pretrained.config.id2label
|
270 |
gesture_translation = idx_to_label[predicted_prod_label.cpu().numpy().item()] # Convert to a scalar
|
271 |
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
|
277 |
-
return gesture_translation , prod_video
|
278 |
|
279 |
with gr.Blocks() as demo:
|
280 |
gr.Markdown("# Indian Sign Language Translation App")
|
281 |
|
282 |
# Gesture recognition Tab
|
283 |
with gr.Tab("Gesture recognition"):
|
284 |
-
with gr.Row(height=300, variant="panel"
|
285 |
with gr.Column(scale=1, variant="panel"):
|
286 |
# Add webcam input for sign language video capture
|
287 |
video_input = gr.Video(format="mp4", label="Gesture")
|
288 |
with gr.Column(scale=1, variant="panel"):
|
289 |
# Display the landmarked video
|
290 |
-
video_output = gr.Video(streaming=
|
291 |
-
with gr.Row(variant="panel"
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
|
|
|
|
296 |
# Set up the interface
|
297 |
video_button.click(translate_sign_language, inputs=video_input, outputs=[text_output, video_output])
|
298 |
|
|
|
257 |
def translate_sign_language(gesture):
|
258 |
# Create Dataset
|
259 |
prod_ds = dataset_prod_obj.create_dataset(gesture)
|
260 |
+
prod_video = tensor_to_video(prod_ds)
|
261 |
+
#prod_video = np.random.randint(0, 255, (32, 225, 225, 3), dtype=np.uint8)
|
262 |
|
263 |
# Run ML Model
|
264 |
predicted_prod_label = prod_function(model_pretrained, prod_ds)
|
|
|
269 |
idx_to_label = model_pretrained.config.id2label
|
270 |
gesture_translation = idx_to_label[predicted_prod_label.cpu().numpy().item()] # Convert to a scalar
|
271 |
|
272 |
+
# Frame generator for real-time streaming
|
273 |
+
def frame_generator():
|
274 |
+
for frame in prod_video:
|
275 |
+
yield frame # Stream frame-by-frame
|
276 |
|
277 |
+
return gesture_translation , frame_generator #prod_video
|
278 |
|
279 |
with gr.Blocks() as demo:
|
280 |
gr.Markdown("# Indian Sign Language Translation App")
|
281 |
|
282 |
# Gesture recognition Tab
|
283 |
with gr.Tab("Gesture recognition"):
|
284 |
+
with gr.Row(height=300, variant="panel"): # equal_height=False, show_progress=True
|
285 |
with gr.Column(scale=1, variant="panel"):
|
286 |
# Add webcam input for sign language video capture
|
287 |
video_input = gr.Video(format="mp4", label="Gesture")
|
288 |
with gr.Column(scale=1, variant="panel"):
|
289 |
# Display the landmarked video
|
290 |
+
video_output = gr.Video(streaming=True, label="Landmarked Gesture")
|
291 |
+
with gr.Row(variant="panel"): # equal_height=False, show_progress=True
|
292 |
+
with gr.Column(scale=1, variant="panel"):
|
293 |
+
# Submit the Video
|
294 |
+
video_button = gr.Button("Submit")
|
295 |
+
with gr.Column(scale=1, variant="panel"):
|
296 |
+
# Add a button or functionality to process the video
|
297 |
+
text_output = gr.Textbox(label="Translation in English")
|
298 |
# Set up the interface
|
299 |
video_button.click(translate_sign_language, inputs=video_input, outputs=[text_output, video_output])
|
300 |
|