YALCINKAYA commited on
Commit
931f180
·
verified ·
1 Parent(s): 158c8e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -377,7 +377,7 @@ def generate_response(user_input, model_id):
377
  formatted_prompt = "\n".join([f"{m['role']}: {m['content']}" for m in func_caller])
378
 
379
  #prompt = user_input
380
- device = accelerator.device # Automatically uses GPU or CPU based on accelerator setup
381
 
382
  generation_config = GenerationConfig(
383
  do_sample=(highest_label == "dialog continuation" or highest_label == "recommendation request"), # True if dialog continuation, else False
@@ -398,9 +398,9 @@ def generate_response(user_input, model_id):
398
  gpt_output = model.generate(gpt_inputs["input_ids"], max_new_tokens=50, generation_config=generation_config)
399
  final_response = tokenizer.decode(gpt_output[0], skip_special_tokens=True)
400
  # Extract AI's response only (omit the prompt)
401
- ai_response2 = final_response.replace(reformulated_prompt, "").strip()
402
  ai_response = re.sub(re.escape(formatted_prompt), "", final_response, flags=re.IGNORECASE).strip()
403
- ai_response = re.split(r'(?<=\w[.!?]) +', ai_response)
404
  ai_response = [s.strip() for s in re.split(r'(?<=\w[.!?]) +', ai_response) if s]
405
 
406
  # Encode the prompt and candidates
 
377
  formatted_prompt = "\n".join([f"{m['role']}: {m['content']}" for m in func_caller])
378
 
379
  #prompt = user_input
380
+ #device = accelerator.device # Automatically uses GPU or CPU based on accelerator setup
381
 
382
  generation_config = GenerationConfig(
383
  do_sample=(highest_label == "dialog continuation" or highest_label == "recommendation request"), # True if dialog continuation, else False
 
398
  gpt_output = model.generate(gpt_inputs["input_ids"], max_new_tokens=50, generation_config=generation_config)
399
  final_response = tokenizer.decode(gpt_output[0], skip_special_tokens=True)
400
  # Extract AI's response only (omit the prompt)
401
+ #ai_response2 = final_response.replace(reformulated_prompt, "").strip()
402
  ai_response = re.sub(re.escape(formatted_prompt), "", final_response, flags=re.IGNORECASE).strip()
403
+ #ai_response = re.split(r'(?<=\w[.!?]) +', ai_response)
404
  ai_response = [s.strip() for s in re.split(r'(?<=\w[.!?]) +', ai_response) if s]
405
 
406
  # Encode the prompt and candidates