Threatthriver commited on
Commit
8476b78
·
verified ·
1 Parent(s): 8e69136

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -88
app.py CHANGED
@@ -6,7 +6,6 @@ import requests
6
  from bs4 import BeautifulSoup
7
  from urllib.parse import urljoin, urlparse
8
  from groq import Groq
9
- import asyncio
10
  import re
11
  import json
12
 
@@ -15,23 +14,12 @@ CEREBRAS_API_KEY = os.getenv("CEREBRAS_API_KEY")
15
  if not CEREBRAS_API_KEY:
16
  raise ValueError("CEREBRAS_API_KEY environment variable is not set.")
17
 
18
- client_cerebras = Cerebras(api_key=CEREBRAS_API_KEY)
19
- client_groq = Groq()
20
-
21
- # --- Rate Limiting ---
22
- CEREBRAS_REQUESTS_PER_MINUTE = 30
23
- CEREBRAS_TOKENS_PER_MINUTE = 6000 # using lowest token limit for versatile model
24
- GROQ_REQUESTS_PER_MINUTE = 30
25
- GROQ_TOKENS_PER_MINUTE = 15000 # using token limit for tool-use-preview model
26
-
27
 
28
- cerebras_request_queue = asyncio.Queue()
29
- groq_request_queue = asyncio.Queue()
30
-
31
- last_cerebras_request_time = 0
32
- last_groq_request_time = 0
33
- cerebras_token_count = 0
34
- groq_token_count = 0
35
 
36
  # --- Model Rate Limit Info ---
37
  CHAT_COMPLETION_MODELS_INFO = """
@@ -98,81 +86,28 @@ def extract_text_from_html(html):
98
  text = soup.get_text(separator=' ', strip=True)
99
  return text
100
 
101
- # --- Asynchronous Rate Limit Logic ---
102
-
103
- async def check_cerebras_rate_limit(num_tokens):
104
- global last_cerebras_request_time
105
- global cerebras_token_count
106
- current_time = time.time()
107
- elapsed_time = current_time - last_cerebras_request_time
108
-
109
- if elapsed_time < 60 and cerebras_request_queue.qsize() >= CEREBRAS_REQUESTS_PER_MINUTE:
110
- await asyncio.sleep(60-elapsed_time)
111
-
112
-
113
- if elapsed_time < 60 and (cerebras_token_count + num_tokens) > CEREBRAS_TOKENS_PER_MINUTE :
114
- time_to_wait = 60 - elapsed_time
115
- await asyncio.sleep(time_to_wait)
116
-
117
- cerebras_request_queue.put_nowait(current_time)
118
- last_cerebras_request_time = time.time()
119
- cerebras_token_count = num_tokens if (elapsed_time > 60) else (cerebras_token_count + num_tokens)
120
-
121
- async def check_groq_rate_limit(num_tokens):
122
- global last_groq_request_time
123
- global groq_token_count
124
- current_time = time.time()
125
- elapsed_time = current_time - last_groq_request_time
126
- if elapsed_time < 60 and groq_request_queue.qsize() >= GROQ_REQUESTS_PER_MINUTE:
127
- await asyncio.sleep(60 - elapsed_time)
128
-
129
- if elapsed_time < 60 and (groq_token_count + num_tokens) > GROQ_TOKENS_PER_MINUTE :
130
- time_to_wait = 60 - elapsed_time
131
- await asyncio.sleep(time_to_wait)
132
-
133
- groq_request_queue.put_nowait(current_time)
134
- last_groq_request_time = time.time()
135
- groq_token_count = num_tokens if (elapsed_time > 60) else (groq_token_count + num_tokens)
136
-
137
 
138
  # --- Chat Logic with Groq ---
139
  async def chat_with_groq(user_input, chat_history):
140
  start_time = time.time()
141
  try:
142
  # Prepare chat history for the prompt
143
- formatted_history = "\n".join([f"User: {msg[0]}\nAI: {msg[1]}" for msg in chat_history[-5:]])
144
- # Check for web scraping command
145
- if user_input.lower().startswith("scrape"):
146
- parts = user_input.split(maxsplit=1)
147
- if len(parts) > 1:
148
- url = parts[1].strip()
149
- if is_valid_url(url):
150
- html_content = fetch_webpage(url)
151
- if not html_content.startswith("Error"):
152
- webpage_text = extract_text_from_html(html_content)
153
- user_input = f"The content from the webpage: {webpage_text}. {user_input}"
154
- else:
155
- user_input = f"{html_content}. {user_input}"
156
- else:
157
- user_input = "Invalid URL provided. " + user_input
158
 
159
  messages = [
160
- {"role": "system", "content": f"""You are IntellijMind, an advanced AI designed to assist users with detailed insights, problem-solving, and chain-of-thought reasoning. You have access to various tools to help the user, and can initiate actions when needed. Be creative and inject humor when appropriate. You can use tools to browse the web when instructed with a 'scrape' command followed by a URL. If there is a request for model info, use the get_model_info function. Current conversation: {formatted_history} Available actions: take_action: 'scrape', parameters: url. Example action: Action: take_action, Parameters: {{"action":"scrape", "url":"https://example.com"}} """},
161
- {"role": "user", "content": user_input}
162
- ]
 
163
  if user_input.lower() == "model info":
164
  response = get_model_info()
165
  return response, "", f"Compute Time: {time.time() - start_time:.2f} seconds", f"Tokens used: {len(user_input.split()) + len(response.split())}"
166
 
167
-
168
- num_tokens = len(user_input.split())
169
- await check_groq_rate_limit(num_tokens)
170
-
171
  completion = client_groq.chat.completions.create(
172
  model="llama3-groq-70b-8192-tool-use-preview",
173
  messages=messages,
174
  temperature=1,
175
- max_tokens=1024,
176
  top_p=1,
177
  stream=True,
178
  stop=None,
@@ -180,31 +115,37 @@ async def chat_with_groq(user_input, chat_history):
180
 
181
  response = ""
182
  chain_of_thought = ""
 
183
  for chunk in completion:
184
  if chunk.choices[0].delta and chunk.choices[0].delta.content:
185
  content = chunk.choices[0].delta.content
186
  response += content
187
  if "Chain of Thought:" in content:
188
  chain_of_thought += content.split("Chain of Thought:", 1)[-1]
189
- # Check if action needs to be taken
190
  if "Action:" in content:
191
  action_match = re.search(r"Action: (\w+), Parameters: (\{.*\})", content)
192
- if action_match:
 
193
  action = action_match.group(1)
194
  parameters = json.loads(action_match.group(2))
195
  if action == "take_action":
196
- if parameters.get("action") == "scrape":
197
  url = parameters.get("url")
198
  if is_valid_url(url):
199
- html_content = fetch_webpage(url)
200
- if not html_content.startswith("Error"):
201
- webpage_text = extract_text_from_html(html_content)
202
- response += f"\nWebpage Content: {webpage_text}\n"
203
- else:
204
- response += f"\nError scraping webpage: {html_content}\n"
205
  else:
206
  response += "\nInvalid URL provided.\n"
207
-
 
 
 
 
208
 
209
  compute_time = time.time() - start_time
210
  token_usage = len(user_input.split()) + len(response.split())
@@ -217,7 +158,7 @@ async def chat_with_groq(user_input, chat_history):
217
  # --- Gradio Interface ---
218
  def gradio_ui():
219
  with gr.Blocks() as demo:
220
- gr.Markdown("""# 🚀 IntellijMind: The Crazy Agent Chatbot\nExperience the most advanced chatbot for deep insights, chain-of-thought reasoning, and unmatched clarity! Get ready for some proactive action!""")
221
 
222
  with gr.Row():
223
  with gr.Column(scale=6):
@@ -263,7 +204,7 @@ def gradio_ui():
263
 
264
  user_input.submit(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
265
 
266
- gr.Markdown("""---\n### 🌟 Features:\n- **Advanced Reasoning**: Chain-of-thought explanations for complex queries.\n- **Proactive Actions**: The agent will take actions without being explicitly asked.\n- **Web Scraping**: The agent will use the scrape command if needed\n- **Humor and Creativity**: Enjoy a more engaging and creative experience.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Token Usage Tracking**: Monitor token usage per response for transparency.\n- **Export Chat History**: Save your conversation as a text file for future reference.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **Submit on Enter**: Seamless interaction with keyboard support.\n""")
267
 
268
  return demo
269
 
 
6
  from bs4 import BeautifulSoup
7
  from urllib.parse import urljoin, urlparse
8
  from groq import Groq
 
9
  import re
10
  import json
11
 
 
14
  if not CEREBRAS_API_KEY:
15
  raise ValueError("CEREBRAS_API_KEY environment variable is not set.")
16
 
17
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
18
+ if not GROQ_API_KEY:
19
+ raise ValueError("GROQ_API_KEY environment variable is not set.")
 
 
 
 
 
 
20
 
21
+ client_cerebras = Cerebras(api_key=CEREBRAS_API_KEY)
22
+ client_groq = Groq(api_key=GROQ_API_KEY)
 
 
 
 
 
23
 
24
  # --- Model Rate Limit Info ---
25
  CHAT_COMPLETION_MODELS_INFO = """
 
86
  text = soup.get_text(separator=' ', strip=True)
87
  return text
88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
  # --- Chat Logic with Groq ---
91
  async def chat_with_groq(user_input, chat_history):
92
  start_time = time.time()
93
  try:
94
  # Prepare chat history for the prompt
95
+ formatted_history = "\n".join([f"User: {msg[0]}\nAI: {msg[1]}" for msg in chat_history[-10:]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  messages = [
98
+ {"role": "system", "content": f"""You are IntellijMind, a highly advanced and proactive AI agent. You are designed to assist users in achieving their goals through detailed insights, creative problem-solving, and the use of various tools. Your objective is to understand the user's intentions, break them into logical steps, and use available tools when needed to achieve the best outcome. Available tools: scrape with a URL, and search_internet with a query. Be creative and inject humor when appropriate. You have access to multiple tools to help the user with their requests. Available actions: take_action: 'scrape', parameters: url, take_action: 'search_internet', parameters: query. Example action: Action: take_action, Parameters: {{"action":"scrape", "url":"https://example.com"}} or Action: take_action, Parameters: {{"action":"search_internet", "query":"latest news on AI"}} . Current conversation: {formatted_history}"""},
99
+ {"role": "user", "content": user_input}
100
+ ]
101
+
102
  if user_input.lower() == "model info":
103
  response = get_model_info()
104
  return response, "", f"Compute Time: {time.time() - start_time:.2f} seconds", f"Tokens used: {len(user_input.split()) + len(response.split())}"
105
 
 
 
 
 
106
  completion = client_groq.chat.completions.create(
107
  model="llama3-groq-70b-8192-tool-use-preview",
108
  messages=messages,
109
  temperature=1,
110
+ max_tokens=2048,
111
  top_p=1,
112
  stream=True,
113
  stop=None,
 
115
 
116
  response = ""
117
  chain_of_thought = ""
118
+ tool_execution_count = 0
119
  for chunk in completion:
120
  if chunk.choices[0].delta and chunk.choices[0].delta.content:
121
  content = chunk.choices[0].delta.content
122
  response += content
123
  if "Chain of Thought:" in content:
124
  chain_of_thought += content.split("Chain of Thought:", 1)[-1]
125
+
126
  if "Action:" in content:
127
  action_match = re.search(r"Action: (\w+), Parameters: (\{.*\})", content)
128
+ if action_match and tool_execution_count < 3: # Limit tool use to avoid infinite loops
129
+ tool_execution_count +=1
130
  action = action_match.group(1)
131
  parameters = json.loads(action_match.group(2))
132
  if action == "take_action":
133
+ if parameters.get("action") == "scrape":
134
  url = parameters.get("url")
135
  if is_valid_url(url):
136
+ html_content = fetch_webpage(url)
137
+ if not html_content.startswith("Error"):
138
+ webpage_text = extract_text_from_html(html_content)
139
+ response += f"\nWebpage Content: {webpage_text}\n"
140
+ else:
141
+ response += f"\nError scraping webpage: {html_content}\n"
142
  else:
143
  response += "\nInvalid URL provided.\n"
144
+ elif parameters.get("action") == "search_internet":
145
+ query = parameters.get("query")
146
+ response += f"\n Search query: {query}. Note: Search is simulated in this environment. Results may vary. \n"
147
+ # Replace the line with a real internet search if you have a search api
148
+ response += f"\n Search Results: Mock Results for query: {query} \n"
149
 
150
  compute_time = time.time() - start_time
151
  token_usage = len(user_input.split()) + len(response.split())
 
158
  # --- Gradio Interface ---
159
  def gradio_ui():
160
  with gr.Blocks() as demo:
161
+ gr.Markdown("""# 🚀 IntellijMind: The Autonomous AI Agent\nExperience the forefront of AI capabilities, where the agent proactively achieves your goals!""")
162
 
163
  with gr.Row():
164
  with gr.Column(scale=6):
 
204
 
205
  user_input.submit(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
206
 
207
+ gr.Markdown("""---\n### 🌟 Features:\n- **Autonomous Agent**: Proactively pursues your goals.\n- **Advanced Tool Use**: Utilizes multiple tools like web scraping and search.\n- **Dynamic and Creative**: Engages with humor and creative responses.\n- **Enhanced Chat History**: Maintains better context of the conversation.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Token Usage Tracking**: Monitor token usage per response for transparency.\n- **Export Chat History**: Save your conversation as a text file for future reference.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **Submit on Enter**: Seamless interaction with keyboard support.\n""")
208
 
209
  return demo
210