import os from groq import Groq import gradio as gr import logging # Set up logging logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) # Initialize the Groq client api_key = os.environ.get("GROQ_API_KEY") if not api_key: logger.error("GROQ_API_KEY environment variable is not set.") raise ValueError("GROQ_API_KEY environment variable is required.") client = Groq(api_key=api_key) MODEL_NAME = os.environ.get("MODEL_NAME", "llama3-8b-8192") # Define a function to handle chat completions def get_completion(user_input): try: completion = client.chat.completions.create( model=MODEL_NAME, messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": user_input} ], temperature=1, max_tokens=1024, top_p=1, stream=True, stop=None, ) response = "" for chunk in completion: response += chunk.choices[0].delta.content or "" return response except Exception as e: logger.error(f"Error during completion: {e}") return "Sorry, I encountered an error while processing your request." # Launch Gradio interface def launch_interface(): demo = gr.Interface( fn=get_completion, inputs=gr.Textbox( label="Enter your query:", placeholder="Ask me anything...", lines=2, max_lines=5, show_label=True, interactive=True ), outputs=gr.Textbox( label="Response:", interactive=False, show_label=True, lines=6, max_lines=10 ), title="Mr AI", description="Ask anything and get a helpful response.", theme="default", css=".gr-box { border-radius: 10px; border: 1px solid #ccc; padding: 10px; }", allow_flagging="never" ) logger.info("Starting Gradio interface") demo.launch(share=True) if __name__ == "__main__": launch_interface()