# ── Imports & API setup ───────────────────────────────────────────────────────── import os, requests, gradio as gr groq_api_key = os.getenv("GROQ_API_KEY") url, headers = ( "https://api.groq.com/openai/v1/chat/completions", {"Authorization": f"Bearer {groq_api_key}"} ) # ── Original core function (unchanged) ────────────────────────────────────────── def chat_with_groq(user_input): # ← still the same body = { "model": "llama-3.1-8b-instant", "messages": [{"role": "user", "content": user_input}] } r = requests.post(url, headers=headers, json=body) return (r.json()['choices'][0]['message']['content'] if r.status_code == 200 else f"Error: {r.json()}") # ── UI helpers ───────────────────────────────────────────────────────────────── CSS = """ #header-row {align-items:center; gap:0.75rem;} #logo {max-width:60px; border-radius:8px;} #title {font-size:2rem; font-weight:600; margin:0;} #left-col {width:64%;} #right-col {width:35%; padding-left:1rem; border:1px solid #e5e5e5; border-radius:8px; padding:1rem;} """ LOGO_URL = ( "https://raw.githubusercontent.com/Decoding-Data-Science/" "airesidency/main/dds_logo.jpg" ) LLM_QUESTIONS = [ "What is Retrieval-Augmented Generation (RAG)?", "Explain Chain-of-Thought prompting in simple terms.", "How do I fine-tune an LLM on my own data?", "What are the security risks of LLM applications?", "Compare zero-shot vs few-shot prompting.", "What is the role of vector databases with LLMs?" ] # ── Build the Gradio app ─────────────────────────────────────────────────────── with gr.Blocks(css=CSS, title="DDS Chat with Groq AI") as demo: # Header row: logo + title with gr.Row(elem_id="header-row"): gr.Image(value=LOGO_URL, elem_id="logo", show_label=False, show_download_button=False) gr.Markdown("