File size: 1,260 Bytes
dbd0a3c
f159a3c
5931998
dbd0a3c
5931998
848db2f
5931998
 
848db2f
5931998
 
8815db0
619f3f9
5931998
 
06ec86b
5931998
 
 
619f3f9
5931998
 
46b695d
5931998
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import streamlit as st
from transformers import pipeline
from io import BytesIO

# Load Hugging Face models for question-answering and text-to-speech
@st.cache_resource
def load_qa_pipeline():
    return pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")

@st.cache_resource
def load_tts_pipeline():
    return pipeline("text-to-speech", model="facebook/fastspeech2-en-ljspeech")

qa_pipeline = load_qa_pipeline()
tts_pipeline = load_tts_pipeline()

# Streamlit interface
st.title("Virtual Assistant")
st.write("Ask me anything!")

# User query
user_query = st.text_input("Type your question here:")

if user_query:
    # Generate answer using the QA model
    context = "This is the context of the assistant. The assistant will answer general knowledge questions."  # Customize context for better QA accuracy
    qa_result = qa_pipeline({"question": user_query, "context": context})
    answer = qa_result['answer']
    
    # Display answer as text
    st.write(f"Answer: {answer}")
    
    # Convert answer to audio using TTS model
    tts_audio = tts_pipeline(answer, return_tensors="pt").audio
    
    # Streamlit audio player for TTS output
    audio_bytes = BytesIO(tts_audio)
    st.audio(audio_bytes, format="audio/wav")