# Import libraries import whisper import os import gradio as gr from groq import Groq from gtts import gTTS import traceback # Step 1: Load Whisper Model for Transcription try: model = whisper.load_model("base") except Exception as e: print("Error loading Whisper model:", e) model = None # Step 2: Initialize Groq Client try: client = Groq(api_key = os.getenv("MY_API_KEY")) except Exception as e: print("Error initializing Groq client:", e) client = None # Function to get response from Groq API using LLaMA model def get_response_from_groq(user_input): try: chat_completion = client.chat.completions.create( messages=[{"role": "user", "content": user_input}], model="llama3-8b-8192", ) return chat_completion.choices[0].message.content except Exception as e: print("Error getting response from Groq API:", e) return "Sorry, I couldn't generate a response at this time." # Step 3: Function to convert text to speech using gTTS def text_to_speech(text): try: tts = gTTS(text) tts.save("response.mp3") return "response.mp3" except Exception as e: print("Error converting text to speech:", e) return None # Step 4: Define the Chatbot Function for Gradio def chatbot(audio_input): try: if model is None: return "Whisper model not loaded.", "Unable to transcribe.", None # Transcribe audio input using Whisper transcription = model.transcribe(audio_input)["text"] # Get response from Groq API using LLaMA model response = get_response_from_groq(transcription) # Convert response to speech speech_file = text_to_speech(response) return transcription, response, speech_file except Exception as e: print("Error in chatbot function:", e) traceback.print_exc() return "Error occurred during processing.", "Please try again.", None # Step 5: Create Gradio Interface try: gr.Interface( fn=chatbot, inputs=gr.Audio(type="filepath"), outputs=["text", "text", "audio"], live=True ).launch() except Exception as e: print("Error launching Gradio interface:", e)