Divymakesml commited on
Commit
9f2d402
Β·
verified Β·
1 Parent(s): 0eefba5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -41
app.py CHANGED
@@ -2,24 +2,33 @@ import os
2
  import time
3
  from datetime import datetime
4
  import streamlit as st
5
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
 
7
  # -- SETUP --
8
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
9
 
10
  @st.cache_resource
11
- def load_respondent():
12
- model_id = "microsoft/phi-2" # switched to a coherent, safe small model
13
- tokenizer = AutoTokenizer.from_pretrained(model_id)
14
- model = AutoModelForCausalLM.from_pretrained(model_id)
15
  return pipeline("text-generation", model=model, tokenizer=tokenizer)
16
 
17
- generator = load_respondent()
18
 
19
  if "history" not in st.session_state:
20
  st.session_state.history = []
21
  st.session_state.summary = ""
22
 
 
 
 
 
 
 
 
 
 
23
  # -- STYLING --
24
  st.markdown("""
25
  <style>
@@ -38,55 +47,40 @@ st.title("🧠 TARS.help")
38
  st.markdown("### A minimal AI that listens, reflects, and replies.")
39
  st.markdown(f"πŸ—“οΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges")
40
 
41
- # -- HIGH-RISK PHRASE FILTER --
42
- TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"]
43
-
44
- def is_high_risk(text):
45
- return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
46
-
47
- # -- INPUT --
48
  user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
49
 
50
- # -- REPLY FUNCTION --
51
- def generate_reply(user_input, context):
52
- prompt = f"""You are a calm, helpful AI assistant who supports users emotionally. Be kind and thoughtful in your reply.
53
-
54
- {context}
55
- User: {user_input}
56
- AI:"""
57
- response = generator(prompt, max_new_tokens=80, temperature=0.7)[0]['generated_text']
58
- return response.split("AI:")[-1].strip()
59
-
60
- # -- CONVERSATION FLOW --
61
  if user_input:
62
  context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
63
  with st.spinner("TARS is reflecting..."):
64
- time.sleep(0.5)
65
  if is_high_risk(user_input):
66
  response = "I'm really sorry you're feeling this way. You're not alone β€” please talk to someone you trust or a mental health professional. πŸ’™"
67
  else:
68
- response = generate_reply(user_input, context)
69
- timestamp = datetime.now().strftime("%H:%M")
70
- st.session_state.history.append(("🧍 You", user_input, timestamp))
71
- st.session_state.history.append(("πŸ€– TARS", response, timestamp))
 
72
 
73
- # -- DISPLAY HISTORY --
74
  st.markdown("## πŸ—¨οΈ Session")
75
  for speaker, msg, time in st.session_state.history:
76
  st.markdown(f"**{speaker} [{time}]:** {msg}")
77
 
78
- # -- SESSION SUMMARY --
79
  if st.button("🧾 Generate Session Summary"):
80
  convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
81
- prompt = f"""You are summarizing a thoughtful conversation between a user and an AI assistant. Write a kind, reflective note based on this interaction.
82
-
83
- Conversation:
84
- {convo}
85
-
86
- Summary:"""
87
- summary = generator(prompt, max_new_tokens=100, temperature=0.5)[0]['generated_text']
88
- st.session_state.summary = summary.split("Summary:")[-1].strip()
89
-
90
  if st.session_state.summary:
91
  st.markdown("### 🧠 Session Note")
92
  st.markdown(st.session_state.summary)
@@ -94,4 +88,4 @@ if st.session_state.summary:
94
 
95
  # -- FOOTER --
96
  st.markdown("---")
97
- st.caption("TARS is not a therapist but a friend. Just a quiet assistant that reflects with you.")
 
2
  import time
3
  from datetime import datetime
4
  import streamlit as st
5
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
6
 
7
  # -- SETUP --
8
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
9
 
10
  @st.cache_resource
11
+ def load_pipeline():
12
+ model_id = "tiiuae/falcon-7b-instruct"
13
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
14
+ model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
15
  return pipeline("text-generation", model=model, tokenizer=tokenizer)
16
 
17
+ generator = load_pipeline()
18
 
19
  if "history" not in st.session_state:
20
  st.session_state.history = []
21
  st.session_state.summary = ""
22
 
23
+ # -- UTILS --
24
+ TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"]
25
+ def is_high_risk(text):
26
+ return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
27
+
28
+ def get_reply(prompt, max_new_tokens=150, temperature=0.7):
29
+ output = generator(prompt, max_new_tokens=max_new_tokens, temperature=temperature)[0]["generated_text"]
30
+ return output.split("AI:")[-1].strip() if "AI:" in output else output.strip()
31
+
32
  # -- STYLING --
33
  st.markdown("""
34
  <style>
 
47
  st.markdown("### A minimal AI that listens, reflects, and replies.")
48
  st.markdown(f"πŸ—“οΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges")
49
 
50
+ # -- USER INPUT --
 
 
 
 
 
 
51
  user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
52
 
53
+ # -- MAIN CHAT FLOW --
 
 
 
 
 
 
 
 
 
 
54
  if user_input:
55
  context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
56
  with st.spinner("TARS is reflecting..."):
57
+ time.sleep(1)
58
  if is_high_risk(user_input):
59
  response = "I'm really sorry you're feeling this way. You're not alone β€” please talk to someone you trust or a mental health professional. πŸ’™"
60
  else:
61
+ prompt = f"You are a compassionate AI therapist.\n{context}\nUser: {user_input}\nAI:"
62
+ response = get_reply(prompt)
63
+ timestamp = datetime.now().strftime("%H:%M")
64
+ st.session_state.history.append(("🧍 You", user_input, timestamp))
65
+ st.session_state.history.append(("πŸ€– TARS", response, timestamp))
66
 
67
+ # -- DISPLAY CHAT --
68
  st.markdown("## πŸ—¨οΈ Session")
69
  for speaker, msg, time in st.session_state.history:
70
  st.markdown(f"**{speaker} [{time}]:** {msg}")
71
 
72
+ # -- SUMMARY GENERATION --
73
  if st.button("🧾 Generate Session Summary"):
74
  convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
75
+ summary_prompt = f"Summarize the tone and content of this therapy session in 3 thoughtful sentences:\n{convo}\nSummary:"
76
+ try:
77
+ summary = get_reply(summary_prompt, max_new_tokens=200, temperature=0.5)
78
+ st.session_state.summary = summary
79
+ except Exception as e:
80
+ st.error("❌ Summary generation failed.")
81
+ st.exception(e)
82
+
83
+ # -- DISPLAY SUMMARY --
84
  if st.session_state.summary:
85
  st.markdown("### 🧠 Session Note")
86
  st.markdown(st.session_state.summary)
 
88
 
89
  # -- FOOTER --
90
  st.markdown("---")
91
+ st.caption("TARS is not a therapist, but a quiet assistant that reflects with you.")