Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
|
|
9 |
|
10 |
@st.cache_resource
|
11 |
def load_respondent():
|
12 |
-
model_id = "
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
14 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
15 |
return pipeline("text-generation", model=model, tokenizer=tokenizer)
|
@@ -49,12 +49,12 @@ user_input = st.text_input("How are you feeling today?", placeholder="Start typi
|
|
49 |
|
50 |
# -- REPLY FUNCTION --
|
51 |
def generate_reply(user_input, context):
|
52 |
-
prompt = f"""You are a
|
53 |
|
54 |
{context}
|
55 |
User: {user_input}
|
56 |
AI:"""
|
57 |
-
response = generator(prompt, max_new_tokens=
|
58 |
return response.split("AI:")[-1].strip()
|
59 |
|
60 |
# -- CONVERSATION FLOW --
|
|
|
9 |
|
10 |
@st.cache_resource
|
11 |
def load_respondent():
|
12 |
+
model_id = "microsoft/phi-2" # switched to a coherent, safe small model
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
14 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
15 |
return pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
|
49 |
|
50 |
# -- REPLY FUNCTION --
|
51 |
def generate_reply(user_input, context):
|
52 |
+
prompt = f"""You are a calm, helpful AI assistant who supports users emotionally. Be kind and thoughtful in your reply.
|
53 |
|
54 |
{context}
|
55 |
User: {user_input}
|
56 |
AI:"""
|
57 |
+
response = generator(prompt, max_new_tokens=80, temperature=0.7)[0]['generated_text']
|
58 |
return response.split("AI:")[-1].strip()
|
59 |
|
60 |
# -- CONVERSATION FLOW --
|