Threatthriver commited on
Commit
30e3c7c
·
verified ·
1 Parent(s): d0347cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -31
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import gradio as gr
2
  import os
3
  import time
4
- from cerebras.cloud.sdk import Cerebras
5
  import requests
6
  from bs4 import BeautifulSoup
7
  from urllib.parse import urljoin, urlparse
@@ -30,9 +29,8 @@ gemma2-9b-it 30 14,400 15,000 500,000
30
  llama-3.1-70b-versatile 30 14,400 6,000 200,000
31
  llama-3.1-8b-instant 30 14,400 20,000 500,000
32
  llama-3.2-11b-text-preview 30 7,000 7,000 500,000
33
- llama-3.2-11b-vision-preview 30 7,000 7,000 500,000
34
- llama-3.2-1b-preview 30 7,000 7,000 500,000
35
- llama-3.2-3b-preview 30 7,000 7,000 500,000
36
  llama-3.2-90b-text-preview 30 7,000 7,000 500,000
37
  llama-3.2-90b-vision-preview 15 3,500 7,000 250,000
38
  llama-3.3-70b-specdec 30 1,000 6,000 100,000
@@ -46,24 +44,20 @@ llava-v1.5-7b-4096-preview 30 14,400 30,000 (No limit)
46
  mixtral-8x7b-32768 30 14,400 5,000 500,000
47
  """
48
 
49
- SPEECH_TO_TEXT_MODELS_INFO = """
50
- Speech To Text
51
  ID Requests per Minute Requests per Day Audio Seconds per Hour Audio Seconds per Day
52
  distil-whisper-large-v3-en 20 2,000 7,200 28,800
53
  whisper-large-v3 20 2,000 7,200 28,800
54
  whisper-large-v3-turbo 20 2,000 7,200 28,800
55
- """
56
 
57
  def get_model_info():
58
  return f"""
59
- {CHAT_COMPLETION_MODELS_INFO}
60
 
61
  {SPEECH_TO_TEXT_MODELS_INFO}
62
  """
63
 
64
-
65
  # --- Helper Functions ---
66
-
67
  def is_valid_url(url):
68
  try:
69
  result = urlparse(url)
@@ -71,37 +65,35 @@ def is_valid_url(url):
71
  except ValueError:
72
  return False
73
 
74
-
75
  def fetch_webpage(url):
76
  try:
77
  response = requests.get(url, timeout=10)
78
- response.raise_for_status() # Raise an exception for bad status codes
79
  return response.text
80
  except requests.exceptions.RequestException as e:
81
  return f"Error fetching URL: {e}"
82
 
83
-
84
  def extract_text_from_html(html):
85
- soup = BeautifulSoup(html, 'html.parser')
86
  text = soup.get_text(separator=' ', strip=True)
87
  return text
88
 
89
-
90
  # --- Chat Logic with Groq ---
91
  async def chat_with_groq(user_input, chat_history):
92
  start_time = time.time()
93
  try:
94
- # Prepare chat history for the prompt
95
  formatted_history = "\n".join([f"User: {msg[0]}\nAI: {msg[1]}" for msg in chat_history[-10:]])
96
 
97
  messages = [
98
- {"role": "system", "content": f"""You are IntellijMind, a highly advanced and proactive AI agent. You are designed to assist users in achieving their goals through detailed insights, creative problem-solving, and the use of various tools. Your objective is to understand the user's intentions, break them into logical steps, and use available tools when needed to achieve the best outcome. Available tools: scrape with a URL, and search_internet with a query. Be creative and inject humor when appropriate. You have access to multiple tools to help the user with their requests. Available actions: take_action: 'scrape', parameters: url, take_action: 'search_internet', parameters: query. Example action: Action: take_action, Parameters: {{"action":"scrape", "url":"https://example.com"}} or Action: take_action, Parameters: {{"action":"search_internet", "query":"latest news on AI"}} . Current conversation: {formatted_history}"""},
 
 
99
  {"role": "user", "content": user_input}
100
  ]
101
 
102
  if user_input.lower() == "model info":
103
- response = get_model_info()
104
- return response, "", f"Compute Time: {time.time() - start_time:.2f} seconds", f"Tokens used: {len(user_input.split()) + len(response.split())}"
105
 
106
  completion = client_groq.chat.completions.create(
107
  model="llama3-groq-70b-8192-tool-use-preview",
@@ -126,7 +118,7 @@ async def chat_with_groq(user_input, chat_history):
126
  if "Action:" in content:
127
  action_match = re.search(r"Action: (\w+), Parameters: (\{.*\})", content)
128
  if action_match and tool_execution_count < 3: # Limit tool use to avoid infinite loops
129
- tool_execution_count +=1
130
  action = action_match.group(1)
131
  parameters = json.loads(action_match.group(2))
132
  if action == "take_action":
@@ -136,7 +128,7 @@ async def chat_with_groq(user_input, chat_history):
136
  html_content = fetch_webpage(url)
137
  if not html_content.startswith("Error"):
138
  webpage_text = extract_text_from_html(html_content)
139
- response += f"\nWebpage Content: {webpage_text}\n"
140
  else:
141
  response += f"\nError scraping webpage: {html_content}\n"
142
  else:
@@ -144,8 +136,7 @@ async def chat_with_groq(user_input, chat_history):
144
  elif parameters.get("action") == "search_internet":
145
  query = parameters.get("query")
146
  response += f"\n Search query: {query}. Note: Search is simulated in this environment. Results may vary. \n"
147
- # Replace the line with a real internet search if you have a search api
148
- response += f"\n Search Results: Mock Results for query: {query} \n"
149
 
150
  compute_time = time.time() - start_time
151
  token_usage = len(user_input.split()) + len(response.split())
@@ -154,7 +145,6 @@ async def chat_with_groq(user_input, chat_history):
154
  except Exception as e:
155
  return "Error: Unable to process your request.", "", str(e), ""
156
 
157
-
158
  # --- Gradio Interface ---
159
  def gradio_ui():
160
  with gr.Blocks() as demo:
@@ -170,7 +160,6 @@ def gradio_ui():
170
 
171
  user_input = gr.Textbox(label="Type your message", placeholder="Ask me anything...", lines=2)
172
 
173
-
174
  with gr.Row():
175
  send_button = gr.Button("Send", variant="primary")
176
  clear_button = gr.Button("Clear Chat")
@@ -178,11 +167,10 @@ def gradio_ui():
178
 
179
  async def handle_chat(chat_history, user_input):
180
  if not user_input.strip():
181
- return chat_history, "", "", "", "Please enter a valid message."
182
 
183
  ai_response, chain_of_thought, compute_info, token_usage = await chat_with_groq(user_input, chat_history)
184
 
185
-
186
  chat_history.append((user_input, ai_response))
187
  return chat_history, chain_of_thought, compute_info, token_usage
188
 
@@ -199,12 +187,12 @@ def gradio_ui():
199
  return f"Chat history exported to {filename}.", ""
200
 
201
  send_button.click(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
202
- clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
203
- export_button.click(export_chat, inputs=[chat_history], outputs=[compute_time, chain_of_thought_display])
204
 
205
- user_input.submit(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
206
 
207
- gr.Markdown("""---\n### 🌟 Features:\n- **Autonomous Agent**: Proactively pursues your goals.\n- **Advanced Tool Use**: Utilizes multiple tools like web scraping and search.\n- **Dynamic and Creative**: Engages with humor and creative responses.\n- **Enhanced Chat History**: Maintains better context of the conversation.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Token Usage Tracking**: Monitor token usage per response for transparency.\n- **Export Chat History**: Save your conversation as a text file for future reference.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **Submit on Enter**: Seamless interaction with keyboard support.\n""")
208
 
209
  return demo
210
 
 
1
  import gradio as gr
2
  import os
3
  import time
 
4
  import requests
5
  from bs4 import BeautifulSoup
6
  from urllib.parse import urljoin, urlparse
 
29
  llama-3.1-70b-versatile 30 14,400 6,000 200,000
30
  llama-3.1-8b-instant 30 14,400 20,000 500,000
31
  llama-3.2-11b-text-preview 30 7,000 7,000 500,000
32
+ llama-3.2-11b-vision-preview) 30 7,000 7,000 500,000
33
+ llama-3.2-1b-preview) 30 7,000 7,000 500,000
 
34
  llama-3.2-90b-text-preview 30 7,000 7,000 500,000
35
  llama-3.2-90b-vision-preview 15 3,500 7,000 250,000
36
  llama-3.3-70b-specdec 30 1,000 6,000 100,000
 
44
  mixtral-8x7b-32768 30 14,400 5,000 500,000
45
  """
46
 
47
+ SPEECH ToText
 
48
  ID Requests per Minute Requests per Day Audio Seconds per Hour Audio Seconds per Day
49
  distil-whisper-large-v3-en 20 2,000 7,200 28,800
50
  whisper-large-v3 20 2,000 7,200 28,800
51
  whisper-large-v3-turbo 20 2,000 7,200 28,800
 
52
 
53
  def get_model_info():
54
  return f"""
55
+ {CHAT_COMPLETION_MODELS_INFO)
56
 
57
  {SPEECH_TO_TEXT_MODELS_INFO}
58
  """
59
 
 
60
  # --- Helper Functions ---
 
61
  def is_valid_url(url):
62
  try:
63
  result = urlparse(url)
 
65
  except ValueError:
66
  return False
67
 
 
68
  def fetch_webpage(url):
69
  try:
70
  response = requests.get(url, timeout=10)
71
+ response.raise_for_status()
72
  return response.text
73
  except requests.exceptions.RequestException as e:
74
  return f"Error fetching URL: {e}"
75
 
 
76
  def extract_text_from_html(html):
77
+ soup = BeautifulSoup(html, 'html.parser)
78
  text = soup.get_text(separator=' ', strip=True)
79
  return text
80
 
 
81
  # --- Chat Logic with Groq ---
82
  async def chat_with_groq(user_input, chat_history):
83
  start_time = time.time()
84
  try:
 
85
  formatted_history = "\n".join([f"User: {msg[0]}\nAI: {msg[1]}" for msg in chat_history[-10:]])
86
 
87
  messages = [
88
+ {"role": "system", "content": f"""
89
+ You are IntellijMind, a highly advanced and proactive AI agent. You are designed to assist users in achieving their goals through detailed insights, creative problem-solving, and the use of various tools. Your objective is to understand the user's intentions, break them into logical steps, and use available tools when needed to achieve the best outcome. Available tools: scrape with a URL, and search_internet with a query. Be creative and inject humor when appropriate. You have access to multiple tools to help the user with their requests. Available actions: take_action: 'scrape', parameters: url, take_action: 'search_internet', parameters: query. Example action: Action: take_action, Parameters: {{"action":"scrape", "url":"https://example.com"}} or Action: take_action, Parameters: {{"action":"search_internet", "query":"latest news on AI"}} . Current conversation: {formatted_history}
90
+ }},
91
  {"role": "user", "content": user_input}
92
  ]
93
 
94
  if user_input.lower() == "model info":
95
+ response = get_model_info()
96
+ return response, "", f"Compute Time: {time.time() - start_time:.2f} seconds", f"Tokens used: {len(user_input.split()) + len(response.split())}"
97
 
98
  completion = client_groq.chat.completions.create(
99
  model="llama3-groq-70b-8192-tool-use-preview",
 
118
  if "Action:" in content:
119
  action_match = re.search(r"Action: (\w+), Parameters: (\{.*\})", content)
120
  if action_match and tool_execution_count < 3: # Limit tool use to avoid infinite loops
121
+ tool_execution_count += 1
122
  action = action_match.group(1)
123
  parameters = json.loads(action_match.group(2))
124
  if action == "take_action":
 
128
  html_content = fetch_webpage(url)
129
  if not html_content.startswith("Error"):
130
  webpage_text = extract_text_from_html(html_content)
131
+ response += f"\nWebpage Content: {webpage_text}\n")
132
  else:
133
  response += f"\nError scraping webpage: {html_content}\n"
134
  else:
 
136
  elif parameters.get("action") == "search_internet":
137
  query = parameters.get("query")
138
  response += f"\n Search query: {query}. Note: Search is simulated in this environment. Results may vary. \n"
139
+ response += f"\nSearch Results: Mock Results for query: {query} \n"
 
140
 
141
  compute_time = time.time() - start_time
142
  token_usage = len(user_input.split()) + len(response.split())
 
145
  except Exception as e:
146
  return "Error: Unable to process your request.", "", str(e), ""
147
 
 
148
  # --- Gradio Interface ---
149
  def gradio_ui():
150
  with gr.Blocks() as demo:
 
160
 
161
  user_input = gr.Textbox(label="Type your message", placeholder="Ask me anything...", lines=2)
162
 
 
163
  with gr.Row():
164
  send_button = gr.Button("Send", variant="primary")
165
  clear_button = gr.Button("Clear Chat")
 
167
 
168
  async def handle_chat(chat_history, user_input):
169
  if not user_input.strip():
170
+ return chat_history, "", "", "", "Please enter a valid message.")
171
 
172
  ai_response, chain_of_thought, compute_info, token_usage = await chat_with_groq(user_input, chat_history)
173
 
 
174
  chat_history.append((user_input, ai_response))
175
  return chat_history, chain_of_thought, compute_info, token_usage
176
 
 
187
  return f"Chat history exported to {filename}.", ""
188
 
189
  send_button.click(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
190
+ clear_button.click(clear_chat, outputs=[chat_history, chain_of(thought_display, compute_time, token_usage_display])
191
+ export_button.click(export_chat, inputs=[chat_history], outputs=[compute_time, chain_of(thought_display])
192
 
193
+ user_input.submit(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of(thought_display, compute_time, token_usage_display])
194
 
195
+ gr.Markdown("""---\n### 🌟 Features:\n- **Autonomous Agent**: Proactively pursues your goals.\n- **Advanced Tool Use**: Utilizes multiple tools like web scraping and search.\n- **Dynamic and Creative**: Engages with humor and creative responses.\n- **Enhanced Chat History**: Maintains better context of the conversation.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Token Usage Tracking**: Monitor token usage per response for transparency.\n- **Export Chat History**: Save your conversation(as a text(file(for future(reference.\n- **User-Friendly Design**: Intuitive chatbot(interface(with(powerful(features.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **Submit on Enter**: Seamless interaction with keyboard support.\n""")
196
 
197
  return demo
198