File size: 2,263 Bytes
f38a4cc
0750794
 
e9ac35c
6f47d31
975abec
0750794
 
 
 
 
 
975abec
 
0750794
975abec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import os
# os.system("pip install scipy")
os.system('pip install -r requirements.txt')

import streamlit as st
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, pipeline
from datasets import load_dataset
import torch
import soundfile as sf
from PIL import Image
import io

# 定义生成语音的函数
def generate_speech(text, model, processor, vocoder, speaker_embeddings):
    inputs = processor(text=text, return_tensors="pt")
    generated_speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
    sf.write("speech.wav", generated_speech.numpy(), samplerate=16000)
    return "speech.wav"

# 初始化模型和处理器
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)

# Streamlit UI
st.title('Visual Question Answering and Text-to-Speech App')

uploaded_image = st.file_uploader("Upload an image", type=["jpg", "png"])
default_question = "How many people and what is the color of this image?"
user_question = st.text_input("Enter your question", value=default_question)

if st.button("Answer and Generate Speech"):
    if uploaded_image:
        image = Image.open(io.BytesIO(uploaded_image.getvalue()))
        vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa")
        vqa_result = vqa_pipeline({"image": image, "question": user_question})
        answer = vqa_result[0]['answer']  # Assume the answer is in the first result

        display_text = f"Question: {user_question} Answer: {answer}"
        st.write(display_text)  # Display the answer
        
        # Generate and play speech
        audio_path = generate_speech(display_text, model, processor, vocoder, speaker_embeddings)
        audio_file = open(audio_path, 'rb')
        audio_bytes = audio_file.read()
        st.audio(audio_bytes, format="audio/wav")
    else:
        st.write("Please upload an image and enter a question.")