Spaces:
Sleeping
Sleeping
File size: 5,968 Bytes
7a3b561 961032c 7a3b561 f17ef40 7a3b561 da65e6e 7a3b561 f17ef40 2ea6c40 7a3b561 f17ef40 46facc0 7a3b561 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
import gradio as gr
import openai
import os
import json
# OpenAI API setup
openai.api_key = os.getenv("GROQ_API_KEY")
openai.api_base = "https://api.groq.com/openai/v1"
# File to store conversation history
CONVERSATION_FILE = "conversation_history.json"
# Function to load conversation history
def load_history():
if not os.path.exists(CONVERSATION_FILE):
# Create the file with an empty list as default content
with open(CONVERSATION_FILE, "w") as file:
json.dump([], file)
try:
with open(CONVERSATION_FILE, "r") as file:
return json.load(file)
except json.JSONDecodeError:
return []
# Function to save conversation history
def save_history(history):
try:
with open(CONVERSATION_FILE, "w") as file:
json.dump(history, file, indent=4)
except Exception as e:
print(f"Error saving history: {e}")
# Function to clear conversation history
def clear_conversation_history():
try:
with open(CONVERSATION_FILE, "w") as file:
json.dump([], file)
return "Conversation history cleared successfully."
except Exception as e:
return f"Error clearing history: {e}"
# Function to get response from the LLM
def get_groq_response(message, history=[]):
try:
messages = [{"role": "system", "content": "Precise answer"}] + history + [{"role": "user", "content": message}]
response = openai.ChatCompletion.create(
model="llama-3.3-70b-versatile",
messages=messages
)
return response.choices[0].message["content"]
except Exception as e:
return f"Error: {str(e)}"
# Text-to-Speech function
def text_to_speech(latest_response):
try:
tts = gTTS(latest_response, lang="en") # Generate speech from text
audio_file = "response_audio.mp3"
tts.save(audio_file)
print(f"Audio file saved at: {audio_file}") # Debugging print
return audio_file # Ensure correct file path is returned
except Exception as e:
print(f"Error generating audio: {e}")
return None
# Chatbot function
def chatbot(user_input, history):
# Load conversation history
conversation_history = history or load_history()
# Format history for the LLM
formatted_history = [{"role": "user" if i % 2 == 0 else "assistant", "content": msg} for i, (msg, _) in enumerate(conversation_history)] + \
[{"role": "assistant", "content": response} for _, response in conversation_history]
# Get bot response
bot_response = get_groq_response(user_input, formatted_history)
# Update history with the new conversation
conversation_history.append((user_input, bot_response))
# Save the updated history
save_history(conversation_history)
return conversation_history, conversation_history, "" # Clear the user input field
# Gradio Interface with enhanced UI/UX
with gr.Blocks(css="""
.gradio-container {
font-family: 'Arial', sans-serif;
background-color: #F2EFE7;
padding: 20px;
height: 100%;
}
.gr-chatbot {
background-color: #FFFFFF;
border-radius: 10px;
padding: 20px;
max-height: 600px; /* Increased height */
overflow-y: auto;
box-shadow: 0px 0px 15px rgba(0, 0, 0, 0.1);
scroll-behavior: smooth; /* Smooth scrolling */
}
.user-message {
background-color: #9ACBD0;
color: #FFF;
padding: 12px;
border-radius: 8px;
margin: 10px 0;
max-width: 60%;
text-align: right;
float: right;
clear: both;
transition: transform 0.3s ease;
}
.bot-message {
background-color: #48A6A7;
color: #FFF;
padding: 12px;
border-radius: 8px;
margin: 10px 0;
max-width: 60%;
text-align: left;
float: left;
clear: both;
transition: transform 0.3s ease;
}
.user-message:hover, .bot-message:hover {
transform: scale(1.05);
box-shadow: 0px 4px 12px rgba(0, 0, 0, 0.1);
}
.gr-button {
background-color: #2973B2;
color: white;
padding: 10px 15px;
border-radius: 8px;
border: none;
transition: background-color 0.3s ease;
}
.gr-button:hover {
background-color: #21689D;
}
.gr-textbox input {
padding: 15px;
font-size: 16px;
}
.gr-markdown h1 {
color: #3A5A6E;
font-size: 28px;
text-align: center;
}
""") as demo:
gr.Markdown("""# Mom: We have ChatGPT at Home, \n ChatGPT at Home: Feel free to ask questions. After you're done, remember to clear the history for privacy. """)
# Chatbot UI
chatbot_ui = gr.Chatbot()
user_input = gr.Textbox(label="Type your message here:", placeholder="Ask me anything...", lines=1)
hear_button = gr.Button("Hear Response")
audio_output = gr.Audio(label="Bot's Voice", type="filepath", interactive=False)
clear_button = gr.Button("Clear History")
system_message = gr.Textbox(label="System Message", interactive=False)
history_state = gr.State(load_history())
# Chat interaction
user_input.submit(chatbot, inputs=[user_input, history_state], outputs=[chatbot_ui, history_state, user_input])
hear_button.click(
lambda latest: text_to_speech(latest[-1][1] if latest else ""), # Fetch latest bot response
inputs=[history_state], # Pass the conversation history
outputs=audio_output # Output the file to the audio player
)
# Clear history button action
clear_button.click(clear_conversation_history, inputs=None, outputs=system_message)
clear_button.click(lambda: [], outputs=chatbot_ui) # Clear the chatbot UI
clear_button.click(lambda: [], outputs=history_state) # Reset the history state
# Launch the app
demo.launch()
|