Kaushik066 commited on
Commit
a4cdf8d
·
1 Parent(s): c95030a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -17
app.py CHANGED
@@ -253,16 +253,12 @@ def list_videos():
253
  def play_video(selected_video):
254
  return os.path.join(data_path, selected_video) if selected_video else None
255
 
256
- ## Function to refresh dropdown options
257
- #def refresh_dropdown():
258
- # return gr.Dropdown.update(choices=list_videos())
259
-
260
  # Main Function for tab - Gesture recognition
261
  def translate_sign_language(gesture):
262
  # Create Dataset
263
  prod_ds = dataset_prod_obj.create_dataset(gesture)
264
- #prod_video = tensor_to_video(prod_ds)
265
- prod_video = np.random.randint(0, 255, (32, 225, 225, 3), dtype=np.uint8)
266
 
267
  # Run ML Model
268
  predicted_prod_label = prod_function(model_pretrained, prod_ds)
@@ -272,23 +268,29 @@ def translate_sign_language(gesture):
272
 
273
  idx_to_label = model_pretrained.config.id2label
274
  gesture_translation = idx_to_label[predicted_prod_label.cpu().numpy().item()] # Convert to a scalar
275
-
276
- return gesture_translation , prod_video
 
 
 
 
 
277
 
278
  with gr.Blocks() as demo:
279
  gr.Markdown("# Indian Sign Language Translation App")
280
 
281
  # Gesture recognition Tab
282
  with gr.Tab("Gesture recognition"):
283
- with gr.Row():
284
- with gr.Column(scale=2, min_width=300):
285
  # Add webcam input for sign language video capture
286
  video_input = gr.Video(format="mp4", label="Gesture")
287
  # Submit the Video
288
  video_button = gr.Button("Submit")
289
- # Display the landmarked video
290
- video_output = gr.Video(streaming=True, label="Landmarked Gesture")
291
- with gr.Row():
 
292
  # Add a button or functionality to process the video
293
  test_output = gr.Textbox(label="Translation in English")
294
  # Set up the interface
@@ -296,10 +298,12 @@ with gr.Blocks() as demo:
296
 
297
  # Indian Sign Language gesture reference tab
298
  with gr.Tab("Indian Sign Language gesture reference"):
299
- with gr.Row():
300
- video_dropdown = gr.Dropdown(choices=list_videos(), label="ISL gestures", info="More gestures comming soon!")
301
- search_button = gr.Button("Search Gesture")
302
- search_output = gr.Video(streaming=True, label="ISL gestures Video")
 
 
303
  # Set up the interface
304
  search_button.click(play_video, inputs=video_dropdown, outputs=search_output)
305
 
 
253
  def play_video(selected_video):
254
  return os.path.join(data_path, selected_video) if selected_video else None
255
 
 
 
 
 
256
  # Main Function for tab - Gesture recognition
257
  def translate_sign_language(gesture):
258
  # Create Dataset
259
  prod_ds = dataset_prod_obj.create_dataset(gesture)
260
+ prod_video = tensor_to_video(prod_ds)
261
+ #prod_video = np.random.randint(0, 255, (32, 225, 225, 3), dtype=np.uint8)
262
 
263
  # Run ML Model
264
  predicted_prod_label = prod_function(model_pretrained, prod_ds)
 
268
 
269
  idx_to_label = model_pretrained.config.id2label
270
  gesture_translation = idx_to_label[predicted_prod_label.cpu().numpy().item()] # Convert to a scalar
271
+
272
+ # Frame generator for real-time streaming
273
+ def frame_generator():
274
+ for frame in prod_video:
275
+ yield frame # Stream frame-by-frame
276
+
277
+ return gesture_translation , frame_generator
278
 
279
  with gr.Blocks() as demo:
280
  gr.Markdown("# Indian Sign Language Translation App")
281
 
282
  # Gesture recognition Tab
283
  with gr.Tab("Gesture recognition"):
284
+ with gr.Row(height=300, variant="panel", equal_height=True, show_progress=True):
285
+ with gr.Column(scale=1, variant="panel"):
286
  # Add webcam input for sign language video capture
287
  video_input = gr.Video(format="mp4", label="Gesture")
288
  # Submit the Video
289
  video_button = gr.Button("Submit")
290
+ with gr.Column(scale=1, variant="panel"):
291
+ # Display the landmarked video
292
+ video_output = gr.Video(streaming=True, label="Landmarked Gesture")
293
+ with gr.Row(height=300, variant="panel", equal_height=True, show_progress=True):
294
  # Add a button or functionality to process the video
295
  test_output = gr.Textbox(label="Translation in English")
296
  # Set up the interface
 
298
 
299
  # Indian Sign Language gesture reference tab
300
  with gr.Tab("Indian Sign Language gesture reference"):
301
+ with gr.Row(height=300, variant="panel", equal_height=True, show_progress=True):
302
+ with gr.Column(scale=1, variant="panel"):
303
+ video_dropdown = gr.Dropdown(choices=list_videos(), label="ISL gestures", info="More gestures comming soon!")
304
+ search_button = gr.Button("Search Gesture")
305
+ with gr.Column(scale=1, variant="panel"):
306
+ search_output = gr.Video(streaming=False, label="ISL gestures Video")
307
  # Set up the interface
308
  search_button.click(play_video, inputs=video_dropdown, outputs=search_output)
309