Mubbashir Ahmed commited on
Commit
016c388
·
1 Parent(s): 042d9eb

adding minimax model

Browse files
Files changed (1) hide show
  1. app.py +44 -30
app.py CHANGED
@@ -2,50 +2,64 @@ import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
- # Authenticate with Hugging Face
6
- client = InferenceClient(
7
- provider="sambanova",
8
- api_key=os.environ["HF_TOKEN"],
9
- )
10
 
11
- def llama4_chat(image_url, text_prompt):
12
- if not text_prompt:
13
- return "Please enter a text question or prompt."
14
 
15
- message_content = [{"type": "text", "text": text_prompt}]
 
 
 
 
 
 
 
 
16
 
17
- if image_url:
18
- message_content.append({
19
- "type": "image_url",
20
- "image_url": {"url": image_url}
21
- })
 
22
 
23
- messages = [{"role": "user", "content": message_content}]
 
 
 
 
 
 
 
 
 
24
 
25
- try:
26
- completion = client.chat.completions.create(
27
- model="meta-llama/Llama-4-Maverick-17B-128E-Instruct",
28
- messages=messages
29
- )
30
- return completion.choices[0].message.content
31
  except Exception as e:
32
  return f"Error: {e}"
33
 
34
  # Gradio UI
35
  with gr.Blocks() as demo:
36
- gr.Markdown("## 🦙 LLaMA 4 Chat (Text + Optional Image URL)")
37
- gr.Markdown("Enter a text prompt. Optionally provide an image URL.")
38
-
39
- with gr.Row():
40
- text_prompt_input = gr.Textbox(label="Text Prompt / Question", placeholder="What is happening in the image?", lines=2)
41
- image_url_input = gr.Textbox(label="Optional Image URL", placeholder="https://example.com/image.jpg")
42
 
 
 
 
 
 
 
 
 
43
  submit_btn = gr.Button("Generate Response")
44
- output_box = gr.Textbox(label="LLaMA 4 Response", lines=8)
45
 
46
  submit_btn.click(
47
- fn=llama4_chat,
48
- inputs=[image_url_input, text_prompt_input],
49
  outputs=output_box
50
  )
51
 
 
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
+ # Clients for both providers
6
+ llama_client = InferenceClient(provider="sambanova", api_key=os.environ["HF_TOKEN"])
7
+ minimax_client = InferenceClient(provider="novita", api_key=os.environ["HF_TOKEN"])
 
 
8
 
9
+ def chat_with_model(model_choice, prompt, image_url):
10
+ if not prompt:
11
+ return "Please enter a text prompt."
12
 
13
+ try:
14
+ if model_choice == "LLaMA 4 (SambaNova)":
15
+ # Prepare message with optional image
16
+ content = [{"type": "text", "text": prompt}]
17
+ if image_url:
18
+ content.append({
19
+ "type": "image_url",
20
+ "image_url": {"url": image_url}
21
+ })
22
 
23
+ messages = [{"role": "user", "content": content}]
24
+ completion = llama_client.chat.completions.create(
25
+ model="meta-llama/Llama-4-Maverick-17B-128E-Instruct",
26
+ messages=messages
27
+ )
28
+ return completion.choices[0].message.content
29
 
30
+ elif model_choice == "MiniMax M1 (Novita)":
31
+ messages = [{"role": "user", "content": prompt}]
32
+ completion = minimax_client.chat.completions.create(
33
+ model="MiniMaxAI/MiniMax-M1-80k",
34
+ messages=messages
35
+ )
36
+ return completion.choices[0].message.content
37
+
38
+ else:
39
+ return "Unsupported model selected."
40
 
 
 
 
 
 
 
41
  except Exception as e:
42
  return f"Error: {e}"
43
 
44
  # Gradio UI
45
  with gr.Blocks() as demo:
46
+ gr.Markdown("## 🤖 Multimodel Chatbot: LLaMA 4 & MiniMax M1")
47
+ gr.Markdown("Choose a model, enter your prompt, and optionally add an image URL for LLaMA.")
 
 
 
 
48
 
49
+ model_dropdown = gr.Dropdown(
50
+ choices=["LLaMA 4 (SambaNova)", "MiniMax M1 (Novita)"],
51
+ value="LLaMA 4 (SambaNova)",
52
+ label="Select Model"
53
+ )
54
+ prompt_input = gr.Textbox(label="Text Prompt", placeholder="Ask something...", lines=2)
55
+ image_url_input = gr.Textbox(label="Optional Image URL (for LLaMA only)", placeholder="https://example.com/image.jpg")
56
+
57
  submit_btn = gr.Button("Generate Response")
58
+ output_box = gr.Textbox(label="Response", lines=8)
59
 
60
  submit_btn.click(
61
+ fn=chat_with_model,
62
+ inputs=[model_dropdown, prompt_input, image_url_input],
63
  outputs=output_box
64
  )
65