Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -37,7 +37,10 @@ def generate_story(theme):
|
|
37 |
|
38 |
|
39 |
def generate_response(user_input):
|
40 |
-
|
|
|
|
|
|
|
41 |
input_ids = gpt2_tokenizer.encode(response_prompt, return_tensors='pt')
|
42 |
response_ids = gpt2_model.generate(
|
43 |
input_ids,
|
@@ -47,8 +50,13 @@ def generate_response(user_input):
|
|
47 |
repetition_penalty=1.2,
|
48 |
num_return_sequences=1
|
49 |
)
|
|
|
|
|
50 |
response = gpt2_tokenizer.decode(response_ids[0], skip_special_tokens=True)
|
51 |
-
|
|
|
|
|
|
|
52 |
|
53 |
|
54 |
|
|
|
37 |
|
38 |
|
39 |
def generate_response(user_input):
|
40 |
+
# Update the prompt to be more natural and empathetic.
|
41 |
+
response_prompt = f"User shares: '{user_input}'. Respond with empathy, encouragement, and reassurance. Be kind and understanding, offering support for their situation."
|
42 |
+
|
43 |
+
# Generate the response using the model (this could be GPT-2 or DialoGPT).
|
44 |
input_ids = gpt2_tokenizer.encode(response_prompt, return_tensors='pt')
|
45 |
response_ids = gpt2_model.generate(
|
46 |
input_ids,
|
|
|
50 |
repetition_penalty=1.2,
|
51 |
num_return_sequences=1
|
52 |
)
|
53 |
+
|
54 |
+
# Decode and return the response
|
55 |
response = gpt2_tokenizer.decode(response_ids[0], skip_special_tokens=True)
|
56 |
+
|
57 |
+
# Clean up the response to avoid the model repeating the prompt
|
58 |
+
cleaned_response = response.replace(f"You are a compassionate, kind,and empathetic support bot. A user has shared their feelings: '{user_input}'. Respond with empathy, encouragement, and motivation.", "").strip()
|
59 |
+
return cleaned_response
|
60 |
|
61 |
|
62 |
|