Mubbashir Ahmed commited on
Commit
a2dcfac
Β·
1 Parent(s): 725f763

integrating chartgpt

Browse files
Files changed (2) hide show
  1. README.md +6 -0
  2. app.py +38 -32
README.md CHANGED
@@ -10,3 +10,9 @@ pinned: false
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
+
14
+
15
+ Models Used:
16
+ - https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
17
+ - https://huggingface.co/meta-llama/Llama-4-Maverick-17B-128E-Instruct
18
+ - https://huggingface.co/MiniMaxAI/MiniMax-M1-80k
app.py CHANGED
@@ -1,26 +1,26 @@
1
  import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
 
4
 
5
- # Setup clients for each provider
6
  llama_client = InferenceClient(provider="sambanova", api_key=os.environ["HF_TOKEN"])
7
  minimax_client = InferenceClient(provider="novita", api_key=os.environ["HF_TOKEN"])
8
  mistral_client = InferenceClient(provider="together", api_key=os.environ["HF_TOKEN"])
9
 
 
 
 
 
10
  def chat_with_model(model_choice, prompt, image_url):
11
  if not prompt:
12
  return "Please enter a text prompt."
13
 
14
  try:
15
- # LLaMA 4 supports optional image input
16
  if model_choice == "LLaMA 4 (SambaNova)":
17
  content = [{"type": "text", "text": prompt}]
18
  if image_url:
19
- content.append({
20
- "type": "image_url",
21
- "image_url": {"url": image_url}
22
- })
23
-
24
  messages = [{"role": "user", "content": content}]
25
  completion = llama_client.chat.completions.create(
26
  model="meta-llama/Llama-4-Maverick-17B-128E-Instruct",
@@ -28,7 +28,6 @@ def chat_with_model(model_choice, prompt, image_url):
28
  )
29
  return completion.choices[0].message.content
30
 
31
- # MiniMax: Text only
32
  elif model_choice == "MiniMax M1 (Novita)":
33
  messages = [{"role": "user", "content": prompt}]
34
  completion = minimax_client.chat.completions.create(
@@ -37,7 +36,6 @@ def chat_with_model(model_choice, prompt, image_url):
37
  )
38
  return completion.choices[0].message.content
39
 
40
- # Mistral: Text only
41
  elif model_choice == "Mistral Mixtral-8x7B (Together)":
42
  messages = [{"role": "user", "content": prompt}]
43
  completion = mistral_client.chat.completions.create(
@@ -48,34 +46,42 @@ def chat_with_model(model_choice, prompt, image_url):
48
 
49
  else:
50
  return "Unsupported model selected."
51
-
52
  except Exception as e:
53
  return f"Error: {e}"
54
 
55
- # Gradio UI
56
- with gr.Blocks() as demo:
57
- gr.Markdown("## πŸ€– Unified Chatbot Interface")
58
- gr.Markdown("Supports LLaMA 4 (with optional image), MiniMax, and Mistral.")
 
 
 
59
 
60
- model_dropdown = gr.Dropdown(
61
- choices=[
62
- "LLaMA 4 (SambaNova)",
63
- "MiniMax M1 (Novita)",
64
- "Mistral Mixtral-8x7B (Together)"
65
- ],
66
- value="LLaMA 4 (SambaNova)",
67
- label="Select Model"
68
- )
69
- prompt_input = gr.Textbox(label="Text Prompt", placeholder="Ask something...", lines=2)
70
- image_url_input = gr.Textbox(label="Optional Image URL (for LLaMA only)", placeholder="https://example.com/image.jpg")
71
 
72
- submit_btn = gr.Button("Generate Response")
73
- output_box = gr.Textbox(label="Response", lines=8)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
- submit_btn.click(
76
- fn=chat_with_model,
77
- inputs=[model_dropdown, prompt_input, image_url_input],
78
- outputs=output_box
79
- )
80
 
81
  demo.launch()
 
1
  import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
+ from transformers import pipeline
5
 
6
+ # === Inference Clients ===
7
  llama_client = InferenceClient(provider="sambanova", api_key=os.environ["HF_TOKEN"])
8
  minimax_client = InferenceClient(provider="novita", api_key=os.environ["HF_TOKEN"])
9
  mistral_client = InferenceClient(provider="together", api_key=os.environ["HF_TOKEN"])
10
 
11
+ # === ChartGPT pipeline ===
12
+ chart_pipe = pipeline("text2text-generation", model="yuan-tian/chartgpt-llama3")
13
+
14
+ # === Chat Handler ===
15
  def chat_with_model(model_choice, prompt, image_url):
16
  if not prompt:
17
  return "Please enter a text prompt."
18
 
19
  try:
 
20
  if model_choice == "LLaMA 4 (SambaNova)":
21
  content = [{"type": "text", "text": prompt}]
22
  if image_url:
23
+ content.append({"type": "image_url", "image_url": {"url": image_url}})
 
 
 
 
24
  messages = [{"role": "user", "content": content}]
25
  completion = llama_client.chat.completions.create(
26
  model="meta-llama/Llama-4-Maverick-17B-128E-Instruct",
 
28
  )
29
  return completion.choices[0].message.content
30
 
 
31
  elif model_choice == "MiniMax M1 (Novita)":
32
  messages = [{"role": "user", "content": prompt}]
33
  completion = minimax_client.chat.completions.create(
 
36
  )
37
  return completion.choices[0].message.content
38
 
 
39
  elif model_choice == "Mistral Mixtral-8x7B (Together)":
40
  messages = [{"role": "user", "content": prompt}]
41
  completion = mistral_client.chat.completions.create(
 
46
 
47
  else:
48
  return "Unsupported model selected."
 
49
  except Exception as e:
50
  return f"Error: {e}"
51
 
52
+ # === ChartGPT Handler ===
53
+ def generate_chart_code(prompt):
54
+ try:
55
+ response = chart_pipe(prompt, max_new_tokens=512)[0]["generated_text"]
56
+ return response
57
+ except Exception as e:
58
+ return f"ChartGPT error: {e}"
59
 
60
+ # === Gradio UI ===
61
+ with gr.Blocks() as demo:
62
+ gr.Markdown("## πŸ”₯ Multi-Tool AI Space: Chat + Chart Generator")
 
 
 
 
 
 
 
 
63
 
64
+ with gr.Tabs():
65
+ with gr.Tab("πŸ’¬ Multimodel Chat"):
66
+ model_dropdown = gr.Dropdown(
67
+ choices=[
68
+ "LLaMA 4 (SambaNova)",
69
+ "MiniMax M1 (Novita)",
70
+ "Mistral Mixtral-8x7B (Together)"
71
+ ],
72
+ value="LLaMA 4 (SambaNova)",
73
+ label="Select Model"
74
+ )
75
+ prompt_input = gr.Textbox(label="Text Prompt", placeholder="Ask something...", lines=2)
76
+ image_url_input = gr.Textbox(label="Optional Image URL (for LLaMA only)", placeholder="https://example.com/image.jpg")
77
+ submit_btn = gr.Button("Generate Response")
78
+ output_box = gr.Textbox(label="Response", lines=8)
79
+ submit_btn.click(chat_with_model, [model_dropdown, prompt_input, image_url_input], output_box)
80
 
81
+ with gr.Tab("πŸ“Š Chart Generator (ChartGPT)"):
82
+ chart_prompt = gr.Textbox(label="Enter data analysis prompt", placeholder="Generate a bar chart comparing 2023 sales in North America and Europe", lines=3)
83
+ chart_btn = gr.Button("Generate Chart Code")
84
+ chart_output = gr.Textbox(label="ChartGPT Code Output", lines=16)
85
+ chart_btn.click(generate_chart_code, chart_prompt, chart_output)
86
 
87
  demo.launch()