Divymakesml commited on
Commit
eb4f9f9
·
verified ·
1 Parent(s): a2cf2cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -9,7 +9,7 @@ os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
9
 
10
  @st.cache_resource
11
  def load_respondent():
12
- model_id = "tiiuae/falcon-rw-1b" # better lightweight model
13
  tokenizer = AutoTokenizer.from_pretrained(model_id)
14
  model = AutoModelForCausalLM.from_pretrained(model_id)
15
  return pipeline("text-generation", model=model, tokenizer=tokenizer)
@@ -49,12 +49,12 @@ user_input = st.text_input("How are you feeling today?", placeholder="Start typi
49
 
50
  # -- REPLY FUNCTION --
51
  def generate_reply(user_input, context):
52
- prompt = f"""You are a quiet, non-judgmental AI assistant. Listen carefully and respond with clarity and kindness.
53
 
54
  {context}
55
  User: {user_input}
56
  AI:"""
57
- response = generator(prompt, max_new_tokens=60, temperature=0.7)[0]['generated_text']
58
  return response.split("AI:")[-1].strip()
59
 
60
  # -- CONVERSATION FLOW --
 
9
 
10
  @st.cache_resource
11
  def load_respondent():
12
+ model_id = "microsoft/phi-2" # switched to a coherent, safe small model
13
  tokenizer = AutoTokenizer.from_pretrained(model_id)
14
  model = AutoModelForCausalLM.from_pretrained(model_id)
15
  return pipeline("text-generation", model=model, tokenizer=tokenizer)
 
49
 
50
  # -- REPLY FUNCTION --
51
  def generate_reply(user_input, context):
52
+ prompt = f"""You are a calm, helpful AI assistant who supports users emotionally. Be kind and thoughtful in your reply.
53
 
54
  {context}
55
  User: {user_input}
56
  AI:"""
57
+ response = generator(prompt, max_new_tokens=80, temperature=0.7)[0]['generated_text']
58
  return response.split("AI:")[-1].strip()
59
 
60
  # -- CONVERSATION FLOW --