RamAnanth1 commited on
Commit
3668ac8
·
1 Parent(s): 1d3dc52

Test upload

Browse files
Files changed (1) hide show
  1. app.py +15 -4
app.py CHANGED
@@ -23,8 +23,16 @@ def _read_file(filename, chunk_size=5242880):
23
  if not data:
24
  break
25
  yield data
26
-
27
- def get_transcript_url(url):
 
 
 
 
 
 
 
 
28
 
29
  # JSON that tells the API which file to trancsribe
30
  json={
@@ -190,7 +198,9 @@ with gr.Blocks() as demo:
190
 
191
  inputs = gr.Textbox(label = "Enter the url for the audio file")
192
  #audio_intelligence_options = gr.CheckboxGroup(audio_intelligence_list, label="Audio Intelligence Options")
193
- b1 = gr.Button('Transcribe')
 
 
194
 
195
  with gr.Tabs():
196
  with gr.TabItem('Transcript') as transcript_tab:
@@ -205,7 +215,8 @@ with gr.Blocks() as demo:
205
 
206
  inputs.submit(get_transcript_url, [inputs], [transcript, summary, sentiment_analysis, topic_detection])
207
  b1.click(get_transcript_url, [inputs], [transcript, summary, sentiment_analysis,topic_detection])
208
-
 
209
  examples = gr.Examples(examples = [["https://github.com/AssemblyAI-Examples/assemblyai-and-python-in-5-minutes/blob/main/audio.mp3?raw=true"]], inputs = inputs, outputs=[transcript, summary, sentiment_analysis, topic_detection], cache_examples = True, fn = get_transcript_url)
210
 
211
 
 
23
  if not data:
24
  break
25
  yield data
26
+
27
+ def get_audio_from_upload(audio):
28
+ upload_response = requests.post(
29
+ upload_endpoint,
30
+ headers=headers,
31
+ data=_read_file(filename))
32
+ print(upload_response.json())
33
+ return
34
+
35
+ def get_transcript_url(url):
36
 
37
  # JSON that tells the API which file to trancsribe
38
  json={
 
198
 
199
  inputs = gr.Textbox(label = "Enter the url for the audio file")
200
  #audio_intelligence_options = gr.CheckboxGroup(audio_intelligence_list, label="Audio Intelligence Options")
201
+ audio_input = gr.Audio("Input Audio")
202
+ b1 = gr.Button('Process Audio')
203
+ b2 = gr.Button("Upload Audio")
204
 
205
  with gr.Tabs():
206
  with gr.TabItem('Transcript') as transcript_tab:
 
215
 
216
  inputs.submit(get_transcript_url, [inputs], [transcript, summary, sentiment_analysis, topic_detection])
217
  b1.click(get_transcript_url, [inputs], [transcript, summary, sentiment_analysis,topic_detection])
218
+ b2.click(get_audio_from_upload, audio_input)
219
+
220
  examples = gr.Examples(examples = [["https://github.com/AssemblyAI-Examples/assemblyai-and-python-in-5-minutes/blob/main/audio.mp3?raw=true"]], inputs = inputs, outputs=[transcript, summary, sentiment_analysis, topic_detection], cache_examples = True, fn = get_transcript_url)
221
 
222