Spaces:
Sleeping
Sleeping
File size: 3,065 Bytes
4c575dd 34ff726 4c575dd 34ff726 4c575dd cdfb8c0 5a0792f 34ff726 4c575dd 34ff726 8e97ae2 34ff726 4c575dd 34ff726 4c575dd 34ff726 71f08a5 6aeffb5 71f08a5 6aeffb5 96b719f 71f08a5 96b719f 34ff726 71f08a5 eeee32e 34ff726 71f08a5 34ff726 71f08a5 4c575dd 34ff726 71f08a5 34ff726 eeee32e 34ff726 71f08a5 eeee32e 71f08a5 6aeffb5 eeee32e 6aeffb5 34ff726 4c575dd 8e97ae2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
import os
from groq import Groq
import gradio as gr
import logging
os.environ["GROQ_API_KEY"] = "sk-2590665b70c44b99a938762ee4175c58"
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Initialize the Groq client
api_key = os.environ.get("GROQ_API_KEY")
if not api_key:
logger.error("GROQ_API_KEY environment variable is not set.")
raise ValueError("GROQ_API_KEY environment variable is required.")
client = Groq(api_key=api_key)
MODEL_NAME = os.environ.get("MODEL_NAME", "llama3-8b-8192")
# Define a function to handle chat completions
def get_completion(user_input):
if not user_input.strip():
return "Please enter a valid query."
# Check if the user asks "Who made you?"
if "who made you" in user_input.lower():
return "I was created by Thirumoorthi, a brilliant mind working on AI systems!"
try:
completion = client.chat.completions.create(
model=MODEL_NAME,
messages=[
{"role": "system", "content": "You are a friendly and helpful assistant, like ChatGPT."},
{"role": "user", "content": user_input}
],
temperature=0.7, # Slightly lower temperature for more controlled responses
max_tokens=1024,
top_p=1,
stream=True,
stop=None,
)
response = ""
for chunk in completion:
response += chunk.choices[0].delta.content or ""
return response.strip() # Clean up response
except Exception as e:
logger.error(f"Error during completion: {e}")
return "Sorry, I encountered an error while processing your request."
# Launch Gradio interface
def launch_interface():
demo = gr.Interface(
fn=get_completion,
inputs=gr.Textbox(
label="Ask me anything:",
placeholder="I am here to help! Ask away...",
lines=2,
max_lines=5,
show_label=True,
interactive=True
),
outputs=gr.Textbox(
label="Response:",
interactive=False,
show_label=True,
lines=6,
max_lines=10
),
title="Chat with Mr AI",
description="I am your friendly assistant, just like ChatGPT! Ask me anything, and I will do my best to help.",
theme="huggingface", # More modern theme
css="""
.gr-box { border-radius: 15px; border: 1px solid #e1e1e1; padding: 20px; background-color: #f9f9f9; }
.gr-button { background-color: #4CAF50; color: white; font-size: 14px; }
.gr-textbox { border-radius: 8px; font-size: 16px; padding: 10px; }
.gr-output { background-color: #f1f1f1; border-radius: 8px; font-size: 16px; padding: 15px; }
""",
allow_flagging="never",
live=True, # Enable live updates if supported
)
logger.info("Starting Gradio interface")
demo.launch(share=True)
if __name__ == "__main__":
launch_interface()
|