deanna-emery commited on
Commit
dbc6d1e
·
1 Parent(s): aa022a4
Files changed (1) hide show
  1. app.py +10 -19
app.py CHANGED
@@ -2,6 +2,9 @@ import cv2
2
  import numpy as np
3
  import gradio as gr
4
 
 
 
 
5
  import tensorflow as tf, tf_keras
6
  import tensorflow_hub as hub
7
  from transformers import AutoTokenizer, TFAutoModelForSeq2SeqLM
@@ -68,11 +71,11 @@ def translate(video_file):
68
 
69
  translation = tokenizer.batch_decode(tokens, skip_special_tokens=True)
70
 
 
71
  return {"translation":translation}
72
 
73
  # Gradio App config
74
  title = "ASL Translation (MoViNet + T5)"
75
-
76
  examples = [
77
  ["videos/My_second_ASL_professors_name_was_Will_White.mp4",],
78
  ['videos/You_are_my_sunshine.mp4'],
@@ -81,31 +84,19 @@ examples = [
81
  ['videos/all.mp4']
82
  ['videos/white.mp4']
83
  ]
84
-
85
  # examples = [
86
- # ["videos/My_second_ASL_professors_name_was_Will_White.mp4", "My second ASL professor's name was Will White"],
87
- # ['videos/You_are_my_sunshine.mp4', 'You are my sunshine'],
88
- # ['videos/scrub_your_hands_for_at_least_20_seconds.mp4', 'scrub your hands for at least 20 seconds'],
 
89
  # ['videos/no.mp4', 'no'],
90
- # ['videos/all.mp4', 'all']
91
- # ['videos/white.mp4', 'white']
92
  # ]
93
 
94
- description = "Gradio demo of word-level sign language classification using I3D model pretrained on the WLASL video dataset. " \
95
- "WLASL is a large-scale dataset containing more than 2000 words in American Sign Language. " \
96
- "Examples used in the demo are videos from the the test subset. " \
97
- "Note that WLASL100 contains 100 words while WLASL2000 contains 2000."
98
-
99
-
100
- article = "More information about the trained models can be found <a href=https://github.com/deanna-emery/ASL-Translator/>here</a>."
101
-
102
-
103
  # Gradio App interface
104
  gr.Interface(fn=translate,
105
  inputs="video",
106
  outputs="text",
107
  allow_flagging="never",
108
  title=title,
109
- description=description,
110
- examples=examples,
111
- article=article).launch()
 
2
  import numpy as np
3
  import gradio as gr
4
 
5
+ # import os
6
+ # os.chdir('modeling')
7
+
8
  import tensorflow as tf, tf_keras
9
  import tensorflow_hub as hub
10
  from transformers import AutoTokenizer, TFAutoModelForSeq2SeqLM
 
71
 
72
  translation = tokenizer.batch_decode(tokens, skip_special_tokens=True)
73
 
74
+ # Return dict {label:pred}
75
  return {"translation":translation}
76
 
77
  # Gradio App config
78
  title = "ASL Translation (MoViNet + T5)"
 
79
  examples = [
80
  ["videos/My_second_ASL_professors_name_was_Will_White.mp4",],
81
  ['videos/You_are_my_sunshine.mp4'],
 
84
  ['videos/all.mp4']
85
  ['videos/white.mp4']
86
  ]
 
87
  # examples = [
88
+ # ['videos/all.mp4', 'all'],
89
+ # ['videos/white.mp4', 'white'],
90
+ # ['videos/before.mp4', 'before'],
91
+ # ['videos/blue.mp4', 'blue'],
92
  # ['videos/no.mp4', 'no'],
93
+ # ['videos/accident2.mp4', 'accident']
 
94
  # ]
95
 
 
 
 
 
 
 
 
 
 
96
  # Gradio App interface
97
  gr.Interface(fn=translate,
98
  inputs="video",
99
  outputs="text",
100
  allow_flagging="never",
101
  title=title,
102
+ examples=examples).launch()