YALCINKAYA commited on
Commit
bfe1386
·
1 Parent(s): 8c114ae

remove User: Assistant: from response

Browse files
Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -75,20 +75,23 @@ def generate_response(user_input, model_id):
75
  prompt = user_input
76
  inputs = tokenizer([prompt], return_tensors="pt")
77
 
78
- generation_config = GenerationConfig(
79
  penalty_alpha=0.6,
80
  do_sample=True,
81
- top_k=5,
82
- temperature=0.6,
 
83
  repetition_penalty=1.2,
84
- max_new_tokens=60, # Adjust as necessary
85
  pad_token_id=tokenizer.eos_token_id,
86
- stop_sequences=["User:", "Assistant:"],
87
  )
88
 
89
  outputs = model.generate(**inputs, generation_config=generation_config)
90
  response = tokenizer.decode(outputs[:, inputs['input_ids'].shape[-1]:][0], skip_special_tokens=True)
91
- return response.strip().split("Assistant:")[-1].strip()
 
 
92
 
93
  def formatted_prompt(question) -> str:
94
  return f"<|startoftext|>User: {question}\nAssistant:"
 
75
  prompt = user_input
76
  inputs = tokenizer([prompt], return_tensors="pt")
77
 
78
+ generation_config = GenerationConfig(
79
  penalty_alpha=0.6,
80
  do_sample=True,
81
+ top_p=0.2,
82
+ top_k=50,
83
+ temperature=0.3,
84
  repetition_penalty=1.2,
85
+ max_new_tokens=60,
86
  pad_token_id=tokenizer.eos_token_id,
87
+ stop_sequences=["User:", "Assistant:", "\n"],
88
  )
89
 
90
  outputs = model.generate(**inputs, generation_config=generation_config)
91
  response = tokenizer.decode(outputs[:, inputs['input_ids'].shape[-1]:][0], skip_special_tokens=True)
92
+ cleaned_response = response.replace("User:", "").replace("Assistant:", "").strip()
93
+ return cleaned_response.strip().split("\n")[0] # Keep only the first line of response
94
+ #return response.strip().split("Assistant:")[-1].strip()
95
 
96
  def formatted_prompt(question) -> str:
97
  return f"<|startoftext|>User: {question}\nAssistant:"