Threatthriver commited on
Commit
24904a3
·
verified ·
1 Parent(s): 047994d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -9
app.py CHANGED
@@ -38,7 +38,6 @@ def chat_with_cerebras(user_input, system_prompt, model, temperature, top_p, max
38
  if "Chain of Thought:" in chunk.choices[0].delta.content:
39
  chain_of_thought += chunk.choices[0].delta.content.split("Chain of Thought:", 1)[-1]
40
 
41
-
42
  # End compute time measurement
43
  compute_time = time.time() - start_time
44
 
@@ -47,7 +46,6 @@ def chat_with_cerebras(user_input, system_prompt, model, temperature, top_p, max
47
  if chain_of_thought:
48
  formatted_response += f"\n\n**Chain of Thought:**\n{chain_of_thought}"
49
 
50
-
51
  return formatted_response, chain_of_thought, f"Compute Time: {compute_time:.2f} seconds"
52
 
53
  except Exception as e:
@@ -65,43 +63,42 @@ def gradio_ui():
65
  compute_time = gr.Textbox(label="Compute Time", interactive=False)
66
  chain_of_thought_display = gr.Textbox(label="Chain of Thought", interactive=False, lines=10)
67
 
68
-
69
  user_input = gr.Textbox(label="Type your message", placeholder="Ask me anything...", lines=2)
70
  send_button = gr.Button("Send", variant="primary")
71
  clear_button = gr.Button("Clear Chat")
72
 
73
- # Set default values for system prompt, model, etc.
74
- default_system_prompt = "You are IntellijMind, an advanced AI designed to assist users with detailed insights, problem-solving, and chain-of-thought reasoning. Provide your answers in markdown format. If you do not know the answer, mention that you do not know and don't make things up."
75
  default_model = "llama-3.3-70b"
76
  default_temperature = 0.2
77
  default_top_p = 1
78
  default_max_tokens = 1024
79
 
 
80
  def handle_chat(chat_history, user_input):
81
  chat_history.append((user_input, None))
82
  yield chat_history, "", "Thinking..."
83
 
84
  ai_response, chain_of_thought, compute_info = chat_with_cerebras(user_input, default_system_prompt, default_model, default_temperature, default_top_p, default_max_tokens)
85
-
86
  chat_history[-1] = (user_input, markdown.markdown(ai_response)) # render markdown output to HTML
87
  yield chat_history, chain_of_thought, compute_info
88
 
 
89
  def clear_chat():
90
  return [], "", ""
91
 
92
-
93
  send_button.click(
94
  handle_chat,
95
  inputs=[chat_history, user_input],
96
  outputs=[chat_history, chain_of_thought_display, compute_time]
97
  )
98
-
99
  clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time])
100
 
101
  gr.Markdown("""---\n### 🌟 Features:\n- **Advanced Reasoning**: Chain-of-thought explanations for complex queries.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Powered by IntellijMind Release 1st**: Setting new standards for AI interaction.\n""")
102
 
103
- return demo
104
 
 
105
 
106
  # Run the Gradio app
107
  demo = gradio_ui()
 
38
  if "Chain of Thought:" in chunk.choices[0].delta.content:
39
  chain_of_thought += chunk.choices[0].delta.content.split("Chain of Thought:", 1)[-1]
40
 
 
41
  # End compute time measurement
42
  compute_time = time.time() - start_time
43
 
 
46
  if chain_of_thought:
47
  formatted_response += f"\n\n**Chain of Thought:**\n{chain_of_thought}"
48
 
 
49
  return formatted_response, chain_of_thought, f"Compute Time: {compute_time:.2f} seconds"
50
 
51
  except Exception as e:
 
63
  compute_time = gr.Textbox(label="Compute Time", interactive=False)
64
  chain_of_thought_display = gr.Textbox(label="Chain of Thought", interactive=False, lines=10)
65
 
 
66
  user_input = gr.Textbox(label="Type your message", placeholder="Ask me anything...", lines=2)
67
  send_button = gr.Button("Send", variant="primary")
68
  clear_button = gr.Button("Clear Chat")
69
 
70
+ # Set default values for system prompt, model, etc.
71
+ default_system_prompt = """You are IntellijMind, an advanced AI designed to assist users with detailed insights, problem-solving, and chain-of-thought reasoning. Provide your answers in markdown format. If you do not know the answer, mention that you do not know and don't make things up. Also, remember to be concise and get straight to the point without unnecessary fluff."""
72
  default_model = "llama-3.3-70b"
73
  default_temperature = 0.2
74
  default_top_p = 1
75
  default_max_tokens = 1024
76
 
77
+
78
  def handle_chat(chat_history, user_input):
79
  chat_history.append((user_input, None))
80
  yield chat_history, "", "Thinking..."
81
 
82
  ai_response, chain_of_thought, compute_info = chat_with_cerebras(user_input, default_system_prompt, default_model, default_temperature, default_top_p, default_max_tokens)
 
83
  chat_history[-1] = (user_input, markdown.markdown(ai_response)) # render markdown output to HTML
84
  yield chat_history, chain_of_thought, compute_info
85
 
86
+
87
  def clear_chat():
88
  return [], "", ""
89
 
 
90
  send_button.click(
91
  handle_chat,
92
  inputs=[chat_history, user_input],
93
  outputs=[chat_history, chain_of_thought_display, compute_time]
94
  )
 
95
  clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time])
96
 
97
  gr.Markdown("""---\n### 🌟 Features:\n- **Advanced Reasoning**: Chain-of-thought explanations for complex queries.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Powered by IntellijMind Release 1st**: Setting new standards for AI interaction.\n""")
98
 
99
+ gr.Markdown("""\n\n## About\nThis project was created by Aniket Kumar as a showcase of AI capabilities with Cerebras. Feel free to explore and share!""")
100
 
101
+ return demo
102
 
103
  # Run the Gradio app
104
  demo = gradio_ui()