Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline | |
from io import BytesIO | |
# Load Hugging Face models for question-answering and text-to-speech | |
def load_qa_pipeline(): | |
return pipeline("question-answering", model="distilbert-base-uncased-distilled-squad") | |
def load_tts_pipeline(): | |
return pipeline("text-to-speech", model="facebook/fastspeech2-en-ljspeech") | |
qa_pipeline = load_qa_pipeline() | |
tts_pipeline = load_tts_pipeline() | |
# Streamlit interface | |
st.title("Virtual Assistant") | |
st.write("Ask me anything!") | |
# User query | |
user_query = st.text_input("Type your question here:") | |
if user_query: | |
# Generate answer using the QA model | |
context = "This is the context of the assistant. The assistant will answer general knowledge questions." # Customize context for better QA accuracy | |
qa_result = qa_pipeline({"question": user_query, "context": context}) | |
answer = qa_result['answer'] | |
# Display answer as text | |
st.write(f"Answer: {answer}") | |
# Convert answer to audio using TTS model | |
tts_audio = tts_pipeline(answer, return_tensors="pt").audio | |
# Streamlit audio player for TTS output | |
audio_bytes = BytesIO(tts_audio) | |
st.audio(audio_bytes, format="audio/wav") |