Pijush2023 commited on
Commit
40502e4
·
verified ·
1 Parent(s): bbb8b3d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -402,13 +402,17 @@ Answer:
402
  """
403
 
404
 
 
 
405
  def clean_response(response_text):
406
  # Remove any metadata-like information and focus on the main content
407
- cleaned_response = re.sub(r'\[Document\(.*?\),?|\{.*?\}', '', response_text, flags=re.DOTALL)
 
 
 
408
  cleaned_response = re.sub(r'\s+', ' ', cleaned_response).strip()
409
  return cleaned_response
410
 
411
-
412
  def generate_answer(message, choice, retrieval_mode, selected_model):
413
  logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
414
 
@@ -449,10 +453,10 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
449
  logging.debug(f"Phi-3.5 Prompt: {prompt}")
450
 
451
  response = selected_model(prompt, **{
452
- "max_new_tokens": 300,
453
  "return_full_text": False,
454
- "temperature": 0.5,
455
- "do_sample": False,
456
  })
457
 
458
  if response:
 
402
  """
403
 
404
 
405
+ import re
406
+
407
  def clean_response(response_text):
408
  # Remove any metadata-like information and focus on the main content
409
+ # Removes "Document(metadata=...)" and other similar patterns
410
+ cleaned_response = re.sub(r'Document\(metadata=.*?\),?\s*', '', response_text, flags=re.DOTALL)
411
+ cleaned_response = re.sub(r'page_content=".*?"\),?', '', cleaned_response, flags=re.DOTALL)
412
+ cleaned_response = re.sub(r'\[.*?\]', '', cleaned_response, flags=re.DOTALL) # Remove content in brackets
413
  cleaned_response = re.sub(r'\s+', ' ', cleaned_response).strip()
414
  return cleaned_response
415
 
 
416
  def generate_answer(message, choice, retrieval_mode, selected_model):
417
  logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
418
 
 
453
  logging.debug(f"Phi-3.5 Prompt: {prompt}")
454
 
455
  response = selected_model(prompt, **{
456
+ "max_new_tokens": 512, # Increased to handle longer responses
457
  "return_full_text": False,
458
+ "temperature": 0.7, # Adjusted to avoid cutting off
459
+ "do_sample": True, # Allow sampling to increase response diversity
460
  })
461
 
462
  if response: