Thomas (Tom) Gardos commited on
Commit
558adb3
2 Parent(s): aa1b9ca 669108f

Merge pull request #96 from DL4DS/cleanup

Browse files

removing debug print statements and minor fix

apps/ai_tutor/chainlit_app.py CHANGED
@@ -13,7 +13,7 @@ from modules.chat.helpers import (
13
  get_sources,
14
  get_history_chat_resume,
15
  get_history_setup_llm,
16
- get_last_config,
17
  )
18
  from modules.chat_processor.helpers import (
19
  update_user_info,
@@ -27,7 +27,6 @@ from helpers import get_time
27
  import copy
28
  from typing import Optional
29
  from chainlit.types import ThreadDict
30
- import time
31
  import base64
32
  from langchain_community.callbacks import get_openai_callback
33
  from datetime import datetime, timezone
@@ -90,7 +89,6 @@ class Chatbot:
90
 
91
  #TODO: Clean this up.
92
  """
93
- start_time = time.time()
94
 
95
  llm_settings = cl.user_session.get("llm_settings", {})
96
  (
@@ -138,8 +136,6 @@ class Chatbot:
138
  cl.user_session.set("chain", self.chain)
139
  cl.user_session.set("llm_tutor", self.llm_tutor)
140
 
141
- print("Time taken to setup LLM: ", time.time() - start_time)
142
-
143
  @no_type_check
144
  async def update_llm(self, new_settings: Dict[str, Any]):
145
  """
@@ -222,32 +218,9 @@ class Chatbot:
222
  """
223
  Inform the user about the updated LLM settings and display them as a message.
224
  """
225
- llm_settings: Dict[str, Any] = cl.user_session.get("llm_settings", {})
226
- llm_tutor = cl.user_session.get("llm_tutor")
227
- settings_dict = {
228
- "model": llm_settings.get("chat_model"),
229
- "retriever": llm_settings.get("retriever_method"),
230
- "memory_window": llm_settings.get("memory_window"),
231
- "num_docs_in_db": (
232
- len(llm_tutor.vector_db)
233
- if llm_tutor and hasattr(llm_tutor, "vector_db")
234
- else 0
235
- ),
236
- "view_sources": llm_settings.get("view_sources"),
237
- "follow_up_questions": llm_settings.get("follow_up_questions"),
238
- }
239
- print("Settings Dict: ", settings_dict)
240
  await cl.Message(
241
  author=SYSTEM,
242
  content="LLM settings have been updated. You can continue with your Query!",
243
- # elements=[
244
- # cl.Text(
245
- # name="settings",
246
- # display="side",
247
- # content=json.dumps(settings_dict, indent=4),
248
- # language="json",
249
- # ),
250
- # ],
251
  ).send()
252
 
253
  async def set_starters(self):
@@ -306,8 +279,6 @@ class Chatbot:
306
  and display and load previous conversation if chat logging is enabled.
307
  """
308
 
309
- start_time = time.time()
310
-
311
  await self.make_llm_settings_widgets(self.config) # Reload the settings widgets
312
 
313
  user = cl.user_session.get("user")
@@ -335,8 +306,6 @@ class Chatbot:
335
  cl.user_session.set("llm_tutor", self.llm_tutor)
336
  cl.user_session.set("chain", self.chain)
337
 
338
- print("Time taken to start LLM: ", time.time() - start_time)
339
-
340
  async def stream_response(self, response):
341
  """
342
  Stream the response from the LLM.
@@ -367,8 +336,6 @@ class Chatbot:
367
  message: The incoming chat message.
368
  """
369
 
370
- start_time = time.time()
371
-
372
  chain = cl.user_session.get("chain")
373
  token_count = 0 # initialize token count
374
  if not chain:
@@ -386,8 +353,6 @@ class Chatbot:
386
  user.metadata = updated_user.metadata
387
  cl.user_session.set("user", user)
388
 
389
- print("\n\n User Tokens Left: ", user.metadata["tokens_left"])
390
-
391
  # see if user has token credits left
392
  # if not, return message saying they have run out of tokens
393
  if user.metadata["tokens_left"] <= 0 and "admin" not in user.metadata["role"]:
@@ -478,12 +443,9 @@ class Chatbot:
478
  )
479
  answer_with_sources = answer_with_sources.replace("$$", "$")
480
 
481
- print("Time taken to process the message: ", time.time() - start_time)
482
-
483
  actions = []
484
 
485
  if self.config["llm_params"]["generate_follow_up"]:
486
- start_time = time.time()
487
  cb_follow_up = cl.AsyncLangchainCallbackHandler()
488
  config = {
489
  "callbacks": (
@@ -513,8 +475,6 @@ class Chatbot:
513
  )
514
  )
515
 
516
- print("Time taken to generate questions: ", time.time() - start_time)
517
-
518
  # # update user info with token count
519
  tokens_left = await update_user_from_chainlit(user, token_count)
520
 
@@ -532,21 +492,20 @@ class Chatbot:
532
  ).send()
533
 
534
  async def on_chat_resume(self, thread: ThreadDict):
535
- thread_config = None
536
  steps = thread["steps"]
537
  k = self.config["llm_params"][
538
  "memory_window"
539
  ] # on resume, alwyas use the default memory window
540
  conversation_list = get_history_chat_resume(steps, k, SYSTEM, LLM)
541
- thread_config = get_last_config(
542
- steps
543
- ) # TODO: Returns None for now - which causes config to be reloaded with default values
544
  cl.user_session.set("memory", conversation_list)
545
- await self.start(config=thread_config)
546
 
547
  @cl.header_auth_callback
548
  def header_auth_callback(headers: dict) -> Optional[cl.User]:
549
- print("\n\n\nI am here\n\n\n")
550
  # try: # TODO: Add try-except block after testing
551
  # TODO: Implement to get the user information from the headers (not the cookie)
552
  cookie = headers.get("cookie") # gets back a str
@@ -562,10 +521,6 @@ class Chatbot:
562
  ).decode()
563
  decoded_user_info = json.loads(decoded_user_info)
564
 
565
- print(
566
- f"\n\n USER ROLE: {decoded_user_info['literalai_info']['metadata']['role']} \n\n"
567
- )
568
-
569
  return cl.User(
570
  id=decoded_user_info["literalai_info"]["id"],
571
  identifier=decoded_user_info["literalai_info"]["identifier"],
 
13
  get_sources,
14
  get_history_chat_resume,
15
  get_history_setup_llm,
16
+ # get_last_config,
17
  )
18
  from modules.chat_processor.helpers import (
19
  update_user_info,
 
27
  import copy
28
  from typing import Optional
29
  from chainlit.types import ThreadDict
 
30
  import base64
31
  from langchain_community.callbacks import get_openai_callback
32
  from datetime import datetime, timezone
 
89
 
90
  #TODO: Clean this up.
91
  """
 
92
 
93
  llm_settings = cl.user_session.get("llm_settings", {})
94
  (
 
136
  cl.user_session.set("chain", self.chain)
137
  cl.user_session.set("llm_tutor", self.llm_tutor)
138
 
 
 
139
  @no_type_check
140
  async def update_llm(self, new_settings: Dict[str, Any]):
141
  """
 
218
  """
219
  Inform the user about the updated LLM settings and display them as a message.
220
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  await cl.Message(
222
  author=SYSTEM,
223
  content="LLM settings have been updated. You can continue with your Query!",
 
 
 
 
 
 
 
 
224
  ).send()
225
 
226
  async def set_starters(self):
 
279
  and display and load previous conversation if chat logging is enabled.
280
  """
281
 
 
 
282
  await self.make_llm_settings_widgets(self.config) # Reload the settings widgets
283
 
284
  user = cl.user_session.get("user")
 
306
  cl.user_session.set("llm_tutor", self.llm_tutor)
307
  cl.user_session.set("chain", self.chain)
308
 
 
 
309
  async def stream_response(self, response):
310
  """
311
  Stream the response from the LLM.
 
336
  message: The incoming chat message.
337
  """
338
 
 
 
339
  chain = cl.user_session.get("chain")
340
  token_count = 0 # initialize token count
341
  if not chain:
 
353
  user.metadata = updated_user.metadata
354
  cl.user_session.set("user", user)
355
 
 
 
356
  # see if user has token credits left
357
  # if not, return message saying they have run out of tokens
358
  if user.metadata["tokens_left"] <= 0 and "admin" not in user.metadata["role"]:
 
443
  )
444
  answer_with_sources = answer_with_sources.replace("$$", "$")
445
 
 
 
446
  actions = []
447
 
448
  if self.config["llm_params"]["generate_follow_up"]:
 
449
  cb_follow_up = cl.AsyncLangchainCallbackHandler()
450
  config = {
451
  "callbacks": (
 
475
  )
476
  )
477
 
 
 
478
  # # update user info with token count
479
  tokens_left = await update_user_from_chainlit(user, token_count)
480
 
 
492
  ).send()
493
 
494
  async def on_chat_resume(self, thread: ThreadDict):
495
+ # thread_config = None
496
  steps = thread["steps"]
497
  k = self.config["llm_params"][
498
  "memory_window"
499
  ] # on resume, alwyas use the default memory window
500
  conversation_list = get_history_chat_resume(steps, k, SYSTEM, LLM)
501
+ # thread_config = get_last_config(
502
+ # steps
503
+ # ) # TODO: Returns None for now - which causes config to be reloaded with default values
504
  cl.user_session.set("memory", conversation_list)
505
+ await self.start()
506
 
507
  @cl.header_auth_callback
508
  def header_auth_callback(headers: dict) -> Optional[cl.User]:
 
509
  # try: # TODO: Add try-except block after testing
510
  # TODO: Implement to get the user information from the headers (not the cookie)
511
  cookie = headers.get("cookie") # gets back a str
 
521
  ).decode()
522
  decoded_user_info = json.loads(decoded_user_info)
523
 
 
 
 
 
524
  return cl.User(
525
  id=decoded_user_info["literalai_info"]["id"],
526
  identifier=decoded_user_info["literalai_info"]["identifier"],
apps/ai_tutor/helpers.py CHANGED
@@ -32,9 +32,6 @@ async def check_user_cooldown(
32
  cooldown_end_time = last_message_time + timedelta(seconds=COOLDOWN_TIME)
33
  cooldown_end_time_iso = cooldown_end_time.isoformat()
34
 
35
- # Debug: Print the cooldown end time
36
- print(f"Cooldown end time (ISO): {cooldown_end_time_iso}")
37
-
38
  # Check if the user is still in cooldown
39
  if elapsed_time_in_seconds < COOLDOWN_TIME:
40
  return True, cooldown_end_time_iso # Return in ISO 8601 format
@@ -81,10 +78,6 @@ async def reset_tokens_for_user(user_info, TOKENS_LEFT, REGEN_TIME):
81
  # Ensure the new token count does not exceed max_tokens
82
  new_token_count = min(current_tokens + tokens_to_regenerate, max_tokens)
83
 
84
- print(
85
- f"\n\n Adding {tokens_to_regenerate} tokens to the user, Time elapsed: {elapsed_time_in_seconds} seconds, Tokens after regeneration: {new_token_count}, Tokens before: {current_tokens} \n\n"
86
- )
87
-
88
  # Update the user's token count
89
  user_info["metadata"]["tokens_left"] = new_token_count
90
 
 
32
  cooldown_end_time = last_message_time + timedelta(seconds=COOLDOWN_TIME)
33
  cooldown_end_time_iso = cooldown_end_time.isoformat()
34
 
 
 
 
35
  # Check if the user is still in cooldown
36
  if elapsed_time_in_seconds < COOLDOWN_TIME:
37
  return True, cooldown_end_time_iso # Return in ISO 8601 format
 
78
  # Ensure the new token count does not exceed max_tokens
79
  new_token_count = min(current_tokens + tokens_to_regenerate, max_tokens)
80
 
 
 
 
 
81
  # Update the user's token count
82
  user_info["metadata"]["tokens_left"] = new_token_count
83
 
apps/chainlit_base/chainlit_base.py CHANGED
@@ -8,7 +8,6 @@ from modules.chat.helpers import (
8
  get_history_setup_llm,
9
  )
10
  import copy
11
- import time
12
  from langchain_community.callbacks import get_openai_callback
13
  from config.config_manager import config_manager
14
 
@@ -36,7 +35,6 @@ class Chatbot:
36
 
37
  #TODO: Clean this up.
38
  """
39
- start_time = time.time()
40
 
41
  llm_settings = cl.user_session.get("llm_settings", {})
42
  (
@@ -84,8 +82,6 @@ class Chatbot:
84
  cl.user_session.set("chain", self.chain)
85
  cl.user_session.set("llm_tutor", self.llm_tutor)
86
 
87
- print("Time taken to setup LLM: ", time.time() - start_time)
88
-
89
  @no_type_check
90
  async def update_llm(self, new_settings: Dict[str, Any]):
91
  """
@@ -168,32 +164,9 @@ class Chatbot:
168
  """
169
  Inform the user about the updated LLM settings and display them as a message.
170
  """
171
- llm_settings: Dict[str, Any] = cl.user_session.get("llm_settings", {})
172
- llm_tutor = cl.user_session.get("llm_tutor")
173
- settings_dict = {
174
- "model": llm_settings.get("chat_model"),
175
- "retriever": llm_settings.get("retriever_method"),
176
- "memory_window": llm_settings.get("memory_window"),
177
- "num_docs_in_db": (
178
- len(llm_tutor.vector_db)
179
- if llm_tutor and hasattr(llm_tutor, "vector_db")
180
- else 0
181
- ),
182
- "view_sources": llm_settings.get("view_sources"),
183
- "follow_up_questions": llm_settings.get("follow_up_questions"),
184
- }
185
- print("Settings Dict: ", settings_dict)
186
  await cl.Message(
187
  author=SYSTEM,
188
  content="LLM settings have been updated. You can continue with your Query!",
189
- # elements=[
190
- # cl.Text(
191
- # name="settings",
192
- # display="side",
193
- # content=json.dumps(settings_dict, indent=4),
194
- # language="json",
195
- # ),
196
- # ],
197
  ).send()
198
 
199
  async def set_starters(self):
@@ -243,8 +216,6 @@ class Chatbot:
243
  and display and load previous conversation if chat logging is enabled.
244
  """
245
 
246
- start_time = time.time()
247
-
248
  await self.make_llm_settings_widgets(self.config) # Reload the settings widgets
249
 
250
  # TODO: remove self.user with cl.user_session.get("user")
@@ -263,8 +234,6 @@ class Chatbot:
263
  cl.user_session.set("llm_tutor", self.llm_tutor)
264
  cl.user_session.set("chain", self.chain)
265
 
266
- print("Time taken to start LLM: ", time.time() - start_time)
267
-
268
  async def stream_response(self, response):
269
  """
270
  Stream the response from the LLM.
@@ -295,8 +264,6 @@ class Chatbot:
295
  message: The incoming chat message.
296
  """
297
 
298
- start_time = time.time()
299
-
300
  chain = cl.user_session.get("chain")
301
  token_count = 0 # initialize token count
302
  if not chain:
@@ -342,12 +309,9 @@ class Chatbot:
342
  )
343
  answer_with_sources = answer_with_sources.replace("$$", "$")
344
 
345
- print("Time taken to process the message: ", time.time() - start_time)
346
-
347
  actions = []
348
 
349
  if self.config["llm_params"]["generate_follow_up"]:
350
- start_time = time.time()
351
  cb_follow_up = cl.AsyncLangchainCallbackHandler()
352
  config = {
353
  "callbacks": (
@@ -377,9 +341,6 @@ class Chatbot:
377
  )
378
  )
379
 
380
- print("Time taken to generate questions: ", time.time() - start_time)
381
- print("Total Tokens Used: ", token_count)
382
-
383
  await cl.Message(
384
  content=answer_with_sources,
385
  elements=source_elements,
 
8
  get_history_setup_llm,
9
  )
10
  import copy
 
11
  from langchain_community.callbacks import get_openai_callback
12
  from config.config_manager import config_manager
13
 
 
35
 
36
  #TODO: Clean this up.
37
  """
 
38
 
39
  llm_settings = cl.user_session.get("llm_settings", {})
40
  (
 
82
  cl.user_session.set("chain", self.chain)
83
  cl.user_session.set("llm_tutor", self.llm_tutor)
84
 
 
 
85
  @no_type_check
86
  async def update_llm(self, new_settings: Dict[str, Any]):
87
  """
 
164
  """
165
  Inform the user about the updated LLM settings and display them as a message.
166
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  await cl.Message(
168
  author=SYSTEM,
169
  content="LLM settings have been updated. You can continue with your Query!",
 
 
 
 
 
 
 
 
170
  ).send()
171
 
172
  async def set_starters(self):
 
216
  and display and load previous conversation if chat logging is enabled.
217
  """
218
 
 
 
219
  await self.make_llm_settings_widgets(self.config) # Reload the settings widgets
220
 
221
  # TODO: remove self.user with cl.user_session.get("user")
 
234
  cl.user_session.set("llm_tutor", self.llm_tutor)
235
  cl.user_session.set("chain", self.chain)
236
 
 
 
237
  async def stream_response(self, response):
238
  """
239
  Stream the response from the LLM.
 
264
  message: The incoming chat message.
265
  """
266
 
 
 
267
  chain = cl.user_session.get("chain")
268
  token_count = 0 # initialize token count
269
  if not chain:
 
309
  )
310
  answer_with_sources = answer_with_sources.replace("$$", "$")
311
 
 
 
312
  actions = []
313
 
314
  if self.config["llm_params"]["generate_follow_up"]:
 
315
  cb_follow_up = cl.AsyncLangchainCallbackHandler()
316
  config = {
317
  "callbacks": (
 
341
  )
342
  )
343
 
 
 
 
344
  await cl.Message(
345
  content=answer_with_sources,
346
  elements=source_elements,
modules/chat/helpers.py CHANGED
@@ -137,31 +137,59 @@ def get_history_chat_resume(steps, k, SYSTEM, LLM):
137
 
138
  def get_history_setup_llm(memory_list):
139
  conversation_list = []
140
- for message in memory_list:
141
- message_dict = message.to_dict() if hasattr(message, "to_dict") else message
142
-
143
- # Check if the type attribute is present as a key or attribute
144
- message_type = (
145
- message_dict.get("type", None)
146
- if isinstance(message_dict, dict)
147
- else getattr(message, "type", None)
 
 
 
 
 
 
148
  )
149
 
150
- # Check if content is present as a key or attribute
151
- message_content = (
152
- message_dict.get("content", None)
153
- if isinstance(message_dict, dict)
154
- else getattr(message, "content", None)
 
 
 
 
 
155
  )
156
 
157
- if message_type in ["ai", "ai_message"]:
158
- conversation_list.append({"type": "ai_message", "content": message_content})
159
- elif message_type in ["human", "user_message"]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  conversation_list.append(
161
- {"type": "user_message", "content": message_content}
162
  )
 
163
  else:
164
- raise ValueError("Invalid message type")
165
 
166
  return conversation_list
167
 
 
137
 
138
  def get_history_setup_llm(memory_list):
139
  conversation_list = []
140
+ i = 0
141
+ while i < len(memory_list) - 1:
142
+ # Process the current and next message
143
+ current_message = memory_list[i]
144
+ next_message = memory_list[i + 1]
145
+
146
+ # Convert messages to dictionary if necessary
147
+ current_message_dict = (
148
+ current_message.to_dict()
149
+ if hasattr(current_message, "to_dict")
150
+ else current_message
151
+ )
152
+ next_message_dict = (
153
+ next_message.to_dict() if hasattr(next_message, "to_dict") else next_message
154
  )
155
 
156
+ # Check message type and content for current and next message
157
+ current_message_type = (
158
+ current_message_dict.get("type", None)
159
+ if isinstance(current_message_dict, dict)
160
+ else getattr(current_message, "type", None)
161
+ )
162
+ current_message_content = (
163
+ current_message_dict.get("content", None)
164
+ if isinstance(current_message_dict, dict)
165
+ else getattr(current_message, "content", None)
166
  )
167
 
168
+ next_message_type = (
169
+ next_message_dict.get("type", None)
170
+ if isinstance(next_message_dict, dict)
171
+ else getattr(next_message, "type", None)
172
+ )
173
+ next_message_content = (
174
+ next_message_dict.get("content", None)
175
+ if isinstance(next_message_dict, dict)
176
+ else getattr(next_message, "content", None)
177
+ )
178
+
179
+ # Check if the current message is user message and the next one is AI message
180
+ if current_message_type in ["human", "user_message"] and next_message_type in [
181
+ "ai",
182
+ "ai_message",
183
+ ]:
184
+ conversation_list.append(
185
+ {"type": "user_message", "content": current_message_content}
186
+ )
187
  conversation_list.append(
188
+ {"type": "ai_message", "content": next_message_content}
189
  )
190
+ i += 2 # Skip the next message since it has been paired
191
  else:
192
+ i += 1 # Move to the next message if not a valid pair (example user message, followed by the cooldown system message)
193
 
194
  return conversation_list
195