yuangongfdu commited on
Commit
05c3eb4
·
1 Parent(s): 4751871

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -0
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import gradio as gr
3
+ import requests
4
+ import os
5
+
6
+ def is_file_larger_than_30mb(file_path):
7
+ try:
8
+ file_size = os.path.getsize(file_path)
9
+ return file_size > (30 * 1024 * 1024)
10
+ except FileNotFoundError:
11
+ return False
12
+ except PermissionError:
13
+ return False
14
+ except Exception as e:
15
+ return False
16
+
17
+ def upload_audio(audio_path):
18
+ try:
19
+ size = is_file_larger_than_30mb(audio_path)
20
+ if size == True:
21
+ return 'size'
22
+ with open(audio_path, 'rb') as audio_file:
23
+ response = requests.post('http://sls-titan-6.csail.mit.edu:8080/upload/', files={'audio_file': audio_file})
24
+ if response.status_code == 200:
25
+ return response.json()["path"]
26
+ except:
27
+ return None
28
+
29
+ def predict(audio_path, question):
30
+ upload_statues = upload_audio(audio_path)
31
+ if upload_statues == None:
32
+ return 'Please upload an audio file.'
33
+ if upload_statues == 'size':
34
+ return 'This demo does not support audio file size larger than 30MB.'
35
+ if question == '':
36
+ return 'Please ask a question.'
37
+ print(audio_path, question)
38
+ response = requests.put('http://sls-titan-6.csail.mit.edu:8080/items/0', json={
39
+ 'audio_path': audio_path, 'question': question
40
+ })
41
+ answer = json.loads(response.content)
42
+ ans_str = answer['output']
43
+ return ans_str
44
+
45
+ if __name__ == '__main__':
46
+ link = "https://github.com/YuanGongND/ltu"
47
+ text = "[Github]"
48
+ paper_link = "https://arxiv.org/pdf/2305.10790.pdf"
49
+ paper_text = "[Paper]"
50
+ sample_audio_link = "https://drive.google.com/drive/folders/17yeBevX0LIS1ugt0DZDOoJolwxvncMja?usp=sharing"
51
+ sample_audio_text = "[sample audios from AudioSet evaluation set]"
52
+ demo = gr.Interface(fn=predict,
53
+ inputs=[gr.Audio(type="filepath"),
54
+ gr.Textbox(value='What can be inferred from the spoken text and sounds? Why?',
55
+ label='Edit the textbox to ask your own questions!')],
56
+ outputs=[gr.Textbox(label="LTU Output")],
57
+ cache_examples=True,
58
+ title="Demo of LTU-2 Beta",
59
+ description="LTU-2 an improved version of LTU. LTU-2 is stronger in spoken text understanding and music understanding. <br>" +
60
+ "LTU is authored by Yuan Gong, Alexander H. Liu, Hongyin Luo, Leonid Karlinsky, and James Glass (MIT & MIT-IBM Watson AI Lab). <br>" +
61
+ "**Please note that the model is under construction and may be buggy. It is trained with some new techniques that are not described in LTU paper. I.e., using method described in LTU paper cannot reproduce this model.**<br>" +
62
+ "Input should be wav file sampled at 16kHz. This demo trim input audio to 10 seconds. <br>"
63
+ "**Research Demo, No Commercial Use (Due to license of LLaMA).**")
64
+ demo.launch(debug=False, share=False)