Mubbashir Ahmed commited on
Commit
4cd8d7e
·
1 Parent(s): fe38860

context-aware code

Browse files
Files changed (1) hide show
  1. app.py +45 -28
app.py CHANGED
@@ -7,55 +7,62 @@ llama_client = InferenceClient(provider="sambanova", api_key=os.environ["HF_TOKE
7
  minimax_client = InferenceClient(provider="novita", api_key=os.environ["HF_TOKEN"])
8
  mistral_client = InferenceClient(provider="together", api_key=os.environ["HF_TOKEN"])
9
 
10
- def chat_with_model(model_choice, prompt, image_url):
 
11
  if not prompt:
12
- return "Please enter a text prompt."
 
 
 
13
 
14
  try:
15
- # LLaMA 4 supports optional image input
16
  if model_choice == "LLaMA 4 (SambaNova)":
17
- content = [{"type": "text", "text": prompt}]
18
  if image_url:
19
- content.append({
20
- "type": "image_url",
21
- "image_url": {"url": image_url}
22
- })
23
 
24
- messages = [{"role": "user", "content": content}]
25
- completion = llama_client.chat.completions.create(
26
  model="meta-llama/Llama-4-Maverick-17B-128E-Instruct",
27
- messages=messages
28
  )
29
- return completion.choices[0].message.content
 
 
30
 
31
- # MiniMax: Text only
32
  elif model_choice == "MiniMax M1 (Novita)":
33
- messages = [{"role": "user", "content": prompt}]
34
- completion = minimax_client.chat.completions.create(
35
  model="MiniMaxAI/MiniMax-M1-80k",
36
- messages=messages
37
  )
38
- return completion.choices[0].message.content
 
 
39
 
40
- # Mistral: Text only
41
  elif model_choice == "Mistral Mixtral-8x7B (Together)":
42
- messages = [{"role": "user", "content": prompt}]
43
- completion = mistral_client.chat.completions.create(
44
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
45
- messages=messages
46
  )
47
- return completion.choices[0].message.content
 
 
48
 
49
  else:
50
- return "Unsupported model selected."
51
 
52
  except Exception as e:
53
- return f"Error: {e}"
54
 
55
  # Gradio UI
56
  with gr.Blocks() as demo:
57
- gr.Markdown("## 🤖 Multi-Model Space")
58
- gr.Markdown("Supports LLaMA 4 (with optional image), MiniMax, and Mistral.")
59
 
60
  model_dropdown = gr.Dropdown(
61
  choices=[
@@ -66,16 +73,26 @@ with gr.Blocks() as demo:
66
  value="LLaMA 4 (SambaNova)",
67
  label="Select Model"
68
  )
 
69
  prompt_input = gr.Textbox(label="Text Prompt", placeholder="Ask something...", lines=2)
70
  image_url_input = gr.Textbox(label="Optional Image URL (for LLaMA only)", placeholder="https://example.com/image.jpg")
71
 
72
  submit_btn = gr.Button("Generate Response")
 
73
  output_box = gr.Textbox(label="Response", lines=8)
74
 
 
 
75
  submit_btn.click(
76
  fn=chat_with_model,
77
- inputs=[model_dropdown, prompt_input, image_url_input],
78
- outputs=output_box
 
 
 
 
 
 
79
  )
80
 
81
  demo.launch()
 
7
  minimax_client = InferenceClient(provider="novita", api_key=os.environ["HF_TOKEN"])
8
  mistral_client = InferenceClient(provider="together", api_key=os.environ["HF_TOKEN"])
9
 
10
+ # Context-aware response function
11
+ def chat_with_model(model_choice, prompt, image_url, chat_history):
12
  if not prompt:
13
+ return "Please enter a text prompt.", chat_history
14
+
15
+ if chat_history is None:
16
+ chat_history = []
17
 
18
  try:
19
+ # === LLaMA 4 ===
20
  if model_choice == "LLaMA 4 (SambaNova)":
21
+ user_msg = [{"type": "text", "text": prompt}]
22
  if image_url:
23
+ user_msg.append({"type": "image_url", "image_url": {"url": image_url}})
24
+ chat_history.append({"role": "user", "content": user_msg})
 
 
25
 
26
+ response = llama_client.chat.completions.create(
 
27
  model="meta-llama/Llama-4-Maverick-17B-128E-Instruct",
28
+ messages=chat_history
29
  )
30
+ bot_msg = response.choices[0].message.content
31
+ chat_history.append({"role": "assistant", "content": bot_msg})
32
+ return bot_msg, chat_history
33
 
34
+ # === MiniMax ===
35
  elif model_choice == "MiniMax M1 (Novita)":
36
+ chat_history.append({"role": "user", "content": prompt})
37
+ response = minimax_client.chat.completions.create(
38
  model="MiniMaxAI/MiniMax-M1-80k",
39
+ messages=chat_history
40
  )
41
+ bot_msg = response.choices[0].message.content
42
+ chat_history.append({"role": "assistant", "content": bot_msg})
43
+ return bot_msg, chat_history
44
 
45
+ # === Mistral ===
46
  elif model_choice == "Mistral Mixtral-8x7B (Together)":
47
+ chat_history.append({"role": "user", "content": prompt})
48
+ response = mistral_client.chat.completions.create(
49
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
50
+ messages=chat_history
51
  )
52
+ bot_msg = response.choices[0].message.content
53
+ chat_history.append({"role": "assistant", "content": bot_msg})
54
+ return bot_msg, chat_history
55
 
56
  else:
57
+ return "Unsupported model selected.", chat_history
58
 
59
  except Exception as e:
60
+ return f"Error: {e}", chat_history
61
 
62
  # Gradio UI
63
  with gr.Blocks() as demo:
64
+ gr.Markdown("## 🤖 Multi-Model Context-Aware Chatbot")
65
+ gr.Markdown("Supports LLaMA 4 (with optional image), MiniMax, and Mistral. Conversation memory is preserved.")
66
 
67
  model_dropdown = gr.Dropdown(
68
  choices=[
 
73
  value="LLaMA 4 (SambaNova)",
74
  label="Select Model"
75
  )
76
+
77
  prompt_input = gr.Textbox(label="Text Prompt", placeholder="Ask something...", lines=2)
78
  image_url_input = gr.Textbox(label="Optional Image URL (for LLaMA only)", placeholder="https://example.com/image.jpg")
79
 
80
  submit_btn = gr.Button("Generate Response")
81
+ reset_btn = gr.Button("🔄 Reset Conversation")
82
  output_box = gr.Textbox(label="Response", lines=8)
83
 
84
+ state = gr.State([])
85
+
86
  submit_btn.click(
87
  fn=chat_with_model,
88
+ inputs=[model_dropdown, prompt_input, image_url_input, state],
89
+ outputs=[output_box, state]
90
+ )
91
+
92
+ reset_btn.click(
93
+ fn=lambda: ("Conversation reset. You can start a new one.", []),
94
+ inputs=[],
95
+ outputs=[output_box, state]
96
  )
97
 
98
  demo.launch()