CompanAIon / app.py
Bey007's picture
Update app.py
f856069 verified
raw
history blame
1.25 kB
def generate_response(user_input):
# Empathy-focused prompt to guide the bot
response_prompt = f"The user has shared the following: '{user_input}'. Respond with empathy, compassion, and understanding. Acknowledge their sadness and offer comforting, reassuring words. Show that you care and validate their feelings without giving unsolicited advice."
# Generate the response using the GPT-2 model
input_ids = gpt2_tokenizer.encode(response_prompt, return_tensors='pt')
response_ids = gpt2_model.generate(
input_ids,
max_length=300,
temperature=0.85,
top_k=50,
repetition_penalty=1.2,
num_return_sequences=1
)
# Decode the response and clean it up by removing the prompt
response = gpt2_tokenizer.decode(response_ids[0], skip_special_tokens=True)
# Strip out the prompt portion to get a clean, empathetic message
cleaned_response = response.replace(f"The user has shared the following: '{user_input}'. Respond with empathy, compassion, and understanding. Acknowledge their sadness and offer comforting, reassuring words. Show that you care and validate their feelings without giving unsolicited advice.", "").strip()
return cleaned_response