Mubbashir Ahmed commited on
Commit
dc55189
Β·
1 Parent(s): 4cd8d7e

multi round dialoging

Browse files
Files changed (2) hide show
  1. .gitignore +1 -1
  2. app.py +25 -11
.gitignore CHANGED
@@ -1 +1 @@
1
- BACKUP.py
 
1
+ CODE*
app.py CHANGED
@@ -2,12 +2,28 @@ import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
- # Setup clients for each provider
6
  llama_client = InferenceClient(provider="sambanova", api_key=os.environ["HF_TOKEN"])
7
  minimax_client = InferenceClient(provider="novita", api_key=os.environ["HF_TOKEN"])
8
  mistral_client = InferenceClient(provider="together", api_key=os.environ["HF_TOKEN"])
9
 
10
- # Context-aware response function
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  def chat_with_model(model_choice, prompt, image_url, chat_history):
12
  if not prompt:
13
  return "Please enter a text prompt.", chat_history
@@ -29,7 +45,6 @@ def chat_with_model(model_choice, prompt, image_url, chat_history):
29
  )
30
  bot_msg = response.choices[0].message.content
31
  chat_history.append({"role": "assistant", "content": bot_msg})
32
- return bot_msg, chat_history
33
 
34
  # === MiniMax ===
35
  elif model_choice == "MiniMax M1 (Novita)":
@@ -40,7 +55,6 @@ def chat_with_model(model_choice, prompt, image_url, chat_history):
40
  )
41
  bot_msg = response.choices[0].message.content
42
  chat_history.append({"role": "assistant", "content": bot_msg})
43
- return bot_msg, chat_history
44
 
45
  # === Mistral ===
46
  elif model_choice == "Mistral Mixtral-8x7B (Together)":
@@ -51,18 +65,19 @@ def chat_with_model(model_choice, prompt, image_url, chat_history):
51
  )
52
  bot_msg = response.choices[0].message.content
53
  chat_history.append({"role": "assistant", "content": bot_msg})
54
- return bot_msg, chat_history
55
 
56
  else:
57
  return "Unsupported model selected.", chat_history
58
 
 
 
59
  except Exception as e:
60
  return f"Error: {e}", chat_history
61
 
62
- # Gradio UI
63
  with gr.Blocks() as demo:
64
  gr.Markdown("## πŸ€– Multi-Model Context-Aware Chatbot")
65
- gr.Markdown("Supports LLaMA 4 (with optional image), MiniMax, and Mistral. Conversation memory is preserved.")
66
 
67
  model_dropdown = gr.Dropdown(
68
  choices=[
@@ -77,10 +92,9 @@ with gr.Blocks() as demo:
77
  prompt_input = gr.Textbox(label="Text Prompt", placeholder="Ask something...", lines=2)
78
  image_url_input = gr.Textbox(label="Optional Image URL (for LLaMA only)", placeholder="https://example.com/image.jpg")
79
 
80
- submit_btn = gr.Button("Generate Response")
81
  reset_btn = gr.Button("πŸ”„ Reset Conversation")
82
- output_box = gr.Textbox(label="Response", lines=8)
83
-
84
  state = gr.State([])
85
 
86
  submit_btn.click(
@@ -90,7 +104,7 @@ with gr.Blocks() as demo:
90
  )
91
 
92
  reset_btn.click(
93
- fn=lambda: ("Conversation reset. You can start a new one.", []),
94
  inputs=[],
95
  outputs=[output_box, state]
96
  )
 
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
+ # Clients for each model provider
6
  llama_client = InferenceClient(provider="sambanova", api_key=os.environ["HF_TOKEN"])
7
  minimax_client = InferenceClient(provider="novita", api_key=os.environ["HF_TOKEN"])
8
  mistral_client = InferenceClient(provider="together", api_key=os.environ["HF_TOKEN"])
9
 
10
+ # Format chat history for Markdown display
11
+ def format_chat_history(chat_history):
12
+ formatted = ""
13
+ for msg in chat_history:
14
+ role = msg["role"]
15
+ content = msg["content"]
16
+ if isinstance(content, list): # For LLaMA image+text input
17
+ for item in content:
18
+ if "text" in item:
19
+ formatted += f"**{role.capitalize()}:** {item['text']}\n\n"
20
+ elif "image_url" in item:
21
+ formatted += f"**{role.capitalize()}:** πŸ–ΌοΈ Image: {item['image_url']['url']}\n\n"
22
+ else:
23
+ formatted += f"**{role.capitalize()}:** {content}\n\n"
24
+ return formatted.strip()
25
+
26
+ # Main chat handler
27
  def chat_with_model(model_choice, prompt, image_url, chat_history):
28
  if not prompt:
29
  return "Please enter a text prompt.", chat_history
 
45
  )
46
  bot_msg = response.choices[0].message.content
47
  chat_history.append({"role": "assistant", "content": bot_msg})
 
48
 
49
  # === MiniMax ===
50
  elif model_choice == "MiniMax M1 (Novita)":
 
55
  )
56
  bot_msg = response.choices[0].message.content
57
  chat_history.append({"role": "assistant", "content": bot_msg})
 
58
 
59
  # === Mistral ===
60
  elif model_choice == "Mistral Mixtral-8x7B (Together)":
 
65
  )
66
  bot_msg = response.choices[0].message.content
67
  chat_history.append({"role": "assistant", "content": bot_msg})
 
68
 
69
  else:
70
  return "Unsupported model selected.", chat_history
71
 
72
+ return format_chat_history(chat_history), chat_history
73
+
74
  except Exception as e:
75
  return f"Error: {e}", chat_history
76
 
77
+ # Gradio interface
78
  with gr.Blocks() as demo:
79
  gr.Markdown("## πŸ€– Multi-Model Context-Aware Chatbot")
80
+ gr.Markdown("Supports LLaMA 4 (with optional image), MiniMax, and Mistral. Memory is preserved for multi-turn dialog.")
81
 
82
  model_dropdown = gr.Dropdown(
83
  choices=[
 
92
  prompt_input = gr.Textbox(label="Text Prompt", placeholder="Ask something...", lines=2)
93
  image_url_input = gr.Textbox(label="Optional Image URL (for LLaMA only)", placeholder="https://example.com/image.jpg")
94
 
95
+ submit_btn = gr.Button("πŸ’¬ Generate Response")
96
  reset_btn = gr.Button("πŸ”„ Reset Conversation")
97
+ output_box = gr.Markdown(label="Chat History", value="")
 
98
  state = gr.State([])
99
 
100
  submit_btn.click(
 
104
  )
105
 
106
  reset_btn.click(
107
+ fn=lambda: ("🧹 Conversation reset. You can start a new one.", []),
108
  inputs=[],
109
  outputs=[output_box, state]
110
  )