doc_app / app.py
tarrasyed19472007's picture
Create app.py
0cafb3e verified
import os
import streamlit as st
import torch
from transformers import pipeline
import speech_recognition as sr
from gtts import gTTS
from io import BytesIO
# Set your Hugging Face API key
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "your_hugging_face_api_key"
# Load the Hugging Face model using text-generation
chatbot = pipeline("text-generation", model="thrishala/mental_health_chatbot")
# Function to get voice input using Whisper
def get_voice_input():
recognizer = sr.Recognizer()
with sr.Microphone() as source:
st.write("Listening...")
audio = recognizer.listen(source)
st.write("Recognizing...")
try:
text = recognizer.recognize_whisper(audio)
return text
except sr.UnknownValueError:
st.error("Sorry, I could not understand the audio.")
return None
except sr.RequestError as e:
st.error(f"Could not request results; {e}")
return None
# Function to generate voice response using gTTS
def speak(text):
tts = gTTS(text=text, lang='en')
audio_file = BytesIO()
tts.save(audio_file)
audio_file.seek(0)
return audio_file
# Streamlit app layout
st.title("Mental Health Chatbot")
st.write("Talk to your mental health assistant!")
# Voice input button
if st.button("Speak"):
user_input = get_voice_input()
if user_input:
st.write(f"You: {user_input}")
# Get response from the chatbot
response = chatbot(user_input, max_length=150, num_return_sequences=1)[0]['generated_text']
st.write(f"Bot: {response}")
# Generate voice response
audio_output = speak(response)
st.audio(audio_output, format="audio/mp3")
# Text input
user_input = st.text_input("Type your message:")
if st.button("Send"):
if user_input:
st.write(f"You: {user_input}")
# Get response from the chatbot
response = chatbot(user_input, max_length=150, num_return_sequences=1)[0]['generated_text']
st.write(f"Bot: {response}")
# Generate voice response
audio_output = speak(response)
st.audio(audio_output, format="audio/mp3")