Spaces:
Sleeping
Sleeping
File size: 5,062 Bytes
7a3b561 7b65c8d 7a3b561 961032c 7a3b561 f17ef40 9caa567 f17ef40 9caa567 f17ef40 7a3b561 9caa567 7a3b561 9caa567 7a3b561 9caa567 7a3b561 9caa567 7a3b561 9caa567 7a3b561 f17ef40 2ea6c40 7a3b561 f17ef40 9caa567 46facc0 7a3b561 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
import gradio as gr
import openai
import os
import json
from gtts import gTTS # Import gTTS for text-to-speech
# OpenAI API setup
openai.api_key = os.getenv("GROQ_API_KEY")
openai.api_base = "https://api.groq.com/openai/v1"
# File to store conversation history
CONVERSATION_FILE = "conversation_history.json"
# Function to load conversation history
def load_history():
if not os.path.exists(CONVERSATION_FILE):
# Create the file with an empty list as default content
with open(CONVERSATION_FILE, "w") as file:
json.dump([], file)
try:
with open(CONVERSATION_FILE, "r") as file:
return json.load(file)
except json.JSONDecodeError:
return []
# Function to save conversation history
def save_history(history):
try:
with open(CONVERSATION_FILE, "w") as file:
json.dump(history, file, indent=4)
except Exception as e:
print(f"Error saving history: {e}")
# Function to clear conversation history
def clear_conversation_history():
try:
with open(CONVERSATION_FILE, "w") as file:
json.dump([], file)
return "Conversation history cleared successfully."
except Exception as e:
return f"Error clearing history: {e}"
# Function to get response from the LLM
def get_groq_response(message, history=[]):
try:
messages = [{"role": "system", "content": "Precise answer"}] + history + [{"role": "user", "content": message}]
response = openai.ChatCompletion.create(
model="llama-3.3-70b-versatile",
messages=messages
)
return response.choices[0].message["content"]
except Exception as e:
return f"Error: {str(e)}"
# Text-to-Speech function
def text_to_speech(latest_response):
try:
if not latest_response: # If there's no response
return None
tts = gTTS(latest_response, lang="en") # Generate speech from text
audio_file = "response_audio.mp3"
tts.save(audio_file)
return audio_file
except Exception as e:
print(f"Error generating audio: {e}")
return None
# Chatbot function
def chatbot(user_input, history):
# Load conversation history
conversation_history = history or load_history()
# Format history for the LLM
formatted_history = [{"role": "user" if i % 2 == 0 else "assistant", "content": msg} for i, (msg, _) in enumerate(conversation_history)] + \
[{"role": "assistant", "content": response} for _, response in conversation_history]
# Get bot response
bot_response = get_groq_response(user_input, formatted_history)
# Update history with the new conversation
conversation_history.append((user_input, bot_response))
# Save the updated history
save_history(conversation_history)
return conversation_history, conversation_history, "" # Clear the user input field
# Gradio Interface with enhanced UI/UX
with gr.Blocks(css="""
.gradio-container {
font-family: 'Arial', sans-serif;
background-color: #F2EFE7;
padding: 20px;
height: 100%;
}
.gr-chatbot {
background-color: #FFFFFF;
border-radius: 10px;
padding: 20px;
max-height: 600px;
overflow-y: auto;
box-shadow: 0px 0px 15px rgba(0, 0, 0, 0.1);
scroll-behavior: smooth;
}
.user-message, .bot-message {
border-radius: 8px;
margin: 10px 0;
max-width: 60%;
padding: 12px;
}
.user-message {
background-color: #9ACBD0;
color: #FFF;
text-align: right;
float: right;
clear: both;
}
.bot-message {
background-color: #48A6A7;
color: #FFF;
text-align: left;
float: left;
clear: both;
}
""") as demo:
gr.Markdown("# ChatGPT at Home\nAsk me anything and hear the response!")
# Chatbot UI
chatbot_ui = gr.Chatbot()
user_input = gr.Textbox(label="Type your message here:", placeholder="Ask me anything...", lines=1)
hear_button = gr.Button("Hear Response")
audio_output = gr.Audio(label="Bot's Voice", type="filepath", interactive=False)
clear_button = gr.Button("Clear History")
system_message = gr.Textbox(label="System Message", interactive=False)
history_state = gr.State(load_history())
# Chat interaction
user_input.submit(chatbot, inputs=[user_input, history_state], outputs=[chatbot_ui, history_state, user_input])
hear_button.click(
lambda latest: text_to_speech(latest[-1][1] if latest else "No response yet."), # Handle empty state
inputs=[history_state],
outputs=audio_output
)
# Clear history button action
clear_button.click(clear_conversation_history, inputs=None, outputs=system_message)
clear_button.click(lambda: [], outputs=chatbot_ui) # Clear the chatbot UI
clear_button.click(lambda: [], outputs=history_state) # Reset the history state
# Launch the app
demo.launch()
|