silvesterjk commited on
Commit
ec24f9c
β€’
1 Parent(s): 3f8b928

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -7
app.py CHANGED
@@ -3,11 +3,10 @@ os.system("pip install git+https://github.com/openai/whisper.git")
3
  import gradio as gr
4
  import whisper
5
 
6
- #from share_btn import community_icon_html, loading_icon_html, share_js
7
 
8
  model = whisper.load_model("medium.en")
9
 
10
-
11
 
12
  def inference(audio):
13
  audio = whisper.load_audio(audio)
@@ -98,6 +97,15 @@ css = """
98
  transform: rotate(360deg);
99
  }
100
  }
 
 
 
 
 
 
 
 
 
101
  """
102
 
103
  block = gr.Blocks(css=css)
@@ -150,11 +158,11 @@ with block:
150
  <rect x="23" y="69" width="23" height="23" fill="black"></rect>
151
  </svg>
152
  <h1 style="font-weight: 900; margin-bottom: 7px;">
153
- Talking Yak Speech Engine
154
  </h1>
155
  </div>
156
  <p style="margin-bottom: 10px; font-size: 94%">
157
- To turn the microphone off: Tap the stop button
158
  </p>
159
  </div>
160
  """
@@ -171,12 +179,20 @@ with block:
171
 
172
  btn = gr.Button("Transcribe")
173
  text = gr.Textbox(show_label=False, elem_id="result-textarea")
174
-
175
- btn.click(inference, inputs=[audio], outputs=[text])
 
 
 
 
 
 
 
 
176
 
177
  gr.HTML('''
178
  <div class="footer">
179
- πŸ€— Hugging Face
180
  </p>
181
  </div>
182
  ''')
 
3
  import gradio as gr
4
  import whisper
5
 
6
+ from share_btn import community_icon_html, loading_icon_html, share_js
7
 
8
  model = whisper.load_model("medium.en")
9
 
 
10
 
11
  def inference(audio):
12
  audio = whisper.load_audio(audio)
 
97
  transform: rotate(360deg);
98
  }
99
  }
100
+ #share-btn-container {
101
+ display: flex; margin-top: 1.5rem !important; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
102
+ }
103
+ #share-btn {
104
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
105
+ }
106
+ #share-btn * {
107
+ all: unset;
108
+ }
109
  """
110
 
111
  block = gr.Blocks(css=css)
 
158
  <rect x="23" y="69" width="23" height="23" fill="black"></rect>
159
  </svg>
160
  <h1 style="font-weight: 900; margin-bottom: 7px;">
161
+ Whisper
162
  </h1>
163
  </div>
164
  <p style="margin-bottom: 10px; font-size: 94%">
165
+ Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification. This demo cuts audio after around 30 secs.
166
  </p>
167
  </div>
168
  """
 
179
 
180
  btn = gr.Button("Transcribe")
181
  text = gr.Textbox(show_label=False, elem_id="result-textarea")
182
+ with gr.Group(elem_id="share-btn-container"):
183
+ community_icon = gr.HTML(community_icon_html, visible=False)
184
+ loading_icon = gr.HTML(loading_icon_html, visible=False)
185
+ share_button = gr.Button("Share to community", elem_id="share-btn", visible=False)
186
+
187
+
188
+
189
+
190
+ btn.click(inference, inputs=[audio], outputs=[text, community_icon, loading_icon, share_button])
191
+ share_button.click(None, [], [], _js=share_js)
192
 
193
  gr.HTML('''
194
  <div class="footer">
195
+ <p>Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a> - Gradio Demo by πŸ€— Hugging Face
196
  </p>
197
  </div>
198
  ''')