import os import gradio as gr from huggingface_hub import InferenceClient # Setup clients for each provider llama_client = InferenceClient(provider="sambanova", api_key=os.environ["HF_TOKEN"]) minimax_client = InferenceClient(provider="novita", api_key=os.environ["HF_TOKEN"]) mistral_client = InferenceClient(provider="together", api_key=os.environ["HF_TOKEN"]) def chat_with_model(model_choice, prompt, image_url): if not prompt: return "Please enter a text prompt." try: # LLaMA 4 supports optional image input if model_choice == "LLaMA 4 (SambaNova)": content = [{"type": "text", "text": prompt}] if image_url: content.append({ "type": "image_url", "image_url": {"url": image_url} }) messages = [{"role": "user", "content": content}] completion = llama_client.chat.completions.create( model="meta-llama/Llama-4-Maverick-17B-128E-Instruct", messages=messages ) return completion.choices[0].message.content # MiniMax: Text only elif model_choice == "MiniMax M1 (Novita)": messages = [{"role": "user", "content": prompt}] completion = minimax_client.chat.completions.create( model="MiniMaxAI/MiniMax-M1-80k", messages=messages ) return completion.choices[0].message.content # Mistral: Text only elif model_choice == "Mistral Mixtral-8x7B (Together)": messages = [{"role": "user", "content": prompt}] completion = mistral_client.chat.completions.create( model="mistralai/Mixtral-8x7B-Instruct-v0.1", messages=messages ) return completion.choices[0].message.content else: return "Unsupported model selected." except Exception as e: return f"Error: {e}" # Gradio UI with gr.Blocks() as demo: gr.Markdown("## 🤖 Multi-Model Space") gr.Markdown("Supports LLaMA 4 (with optional image), MiniMax, and Mistral.") model_dropdown = gr.Dropdown( choices=[ "LLaMA 4 (SambaNova)", "MiniMax M1 (Novita)", "Mistral Mixtral-8x7B (Together)" ], value="LLaMA 4 (SambaNova)", label="Select Model" ) prompt_input = gr.Textbox(label="Text Prompt", placeholder="Ask something...", lines=2) image_url_input = gr.Textbox(label="Optional Image URL (for LLaMA only)", placeholder="https://example.com/image.jpg") submit_btn = gr.Button("Generate Response") output_box = gr.Textbox(label="Response", lines=8) submit_btn.click( fn=chat_with_model, inputs=[model_dropdown, prompt_input, image_url_input], outputs=output_box ) demo.launch()