monster119120 commited on
Commit
0750794
·
verified ·
1 Parent(s): 461ee57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -39
app.py CHANGED
@@ -1,43 +1,46 @@
1
  import os
2
- os.system("pip install scipy")
3
- # os.system('pip install -r requirements.txt')
4
 
5
- from PIL import Image
6
- import io
7
  import streamlit as st
 
 
 
 
8
  from transformers import pipeline
9
- import scipy
10
-
11
- # 初始化视觉问题回答和文本到语音的管道
12
- vqa_pipeline = pipeline("visual-question-answering", model="microsoft/git-base-vqav2")
13
- tts_pipeline = pipeline("text-to-speech", "suno/bark")
14
-
15
- def main():
16
- st.title("Visual Question Answering & Text-to-Audio App")
17
-
18
- image = st.file_uploader("Upload an image", type=["jpg", "png"])
19
- question = st.text_input("Enter your question")
20
-
21
- if st.button('Run Visual Question Answering'):
22
- if image and question:
23
- # 将上传的文件转换为 PIL 图片
24
- image = Image.open(io.BytesIO(image.getvalue()))
25
-
26
- # 对用户上传的图片和问题进行视觉问题回答
27
- vqa_result = vqa_pipeline({"image": image, "question": question})
28
-
29
- # 这里假设vqa_result返回的是一个列表,其中包含字典,且字典里有'answer'键
30
- if vqa_result: # 确保vqa_result不为空
31
- answer = vqa_result[0]['answer'] # 获取回答
32
- st.write(f"Answer: {answer}") # 显示回答
33
-
34
- # 将回答转换为音频并播放
35
- speech = tts_pipeline(answer, forward_params={"do_sample": True})
36
- scipy.io.wavfile.write("bark_out.wav", rate=speech["sampling_rate"], data=speech["audio"])
37
-
38
- st.audio("bark_out.wav", format="audio/wav")
39
- else:
40
- st.write("Please input an image and a question first.")
41
-
42
- if __name__ == "__main__":
43
- main()
 
 
1
  import os
2
+ # os.system("pip install scipy")
3
+ os.system('pip install -r requirements.txt')
4
 
 
 
5
  import streamlit as st
6
+ from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
7
+ from datasets import load_dataset
8
+ import torch
9
+ import soundfile as sf
10
  from transformers import pipeline
11
+ from PIL import Image
12
+ import io
13
+
14
+ st.title('Video to text and then text to speech app')
15
+
16
+
17
+ image = st.file_uploader("Upload an image", type=["jpg", "png"])
18
+
19
+ question = st.text_input(
20
+ label="Enter your question",
21
+ value = "How many people and what is the color of this image?"
22
+ )
23
+
24
+ def generate_speech(text):
25
+ processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
26
+ model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
27
+ vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
28
+ inputs = processor(text=text, return_tensors="pt")
29
+
30
+ embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
31
+ speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
32
+
33
+ speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
34
+
35
+ sf.write("speech.wav", speech.numpy(), samplerate=16000)
36
+
37
+ if st.button("Generate"):
38
+ image = Image.open(io.BytesIO(image.getvalue()))
39
+ vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa")
40
+ vqa_result = vqa_pipeline({"image": image, "question": question})
41
+ answer = vqa_result[0]['answer']
42
+ st.write(f"Question: {question} Answer: {answer}") # 显示回答
43
+ generate_speech(f"Question: {question}, Answer: {answer}")
44
+ audio_file = open("speech.wav", 'rb')
45
+ audio_bytes = audio_file.read()
46
+ st.audio(audio_bytes, format="audio/wav")