Divymakesml commited on
Commit
0eefba5
Β·
verified Β·
1 Parent(s): 4208dd4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -39
app.py CHANGED
@@ -2,37 +2,24 @@ import os
2
  import time
3
  from datetime import datetime
4
  import streamlit as st
5
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
6
 
7
  # -- SETUP --
8
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
9
 
10
  @st.cache_resource
11
- def load_pipeline():
12
- model_id = "tiiuae/falcon-7b-instruct"
13
- pipe = pipeline(
14
- "text-generation",
15
- model=AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True),
16
- tokenizer=AutoTokenizer.from_pretrained(model_id, trust_remote_code=True),
17
- device_map="auto"
18
- )
19
- return pipe
20
-
21
- generator = load_pipeline()
22
 
23
  if "history" not in st.session_state:
24
  st.session_state.history = []
25
  st.session_state.summary = ""
26
 
27
- # -- UTILS --
28
- TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"]
29
- def is_high_risk(text):
30
- return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
31
-
32
- def get_reply(prompt, max_new_tokens=150, temperature=0.7):
33
- out = generator(prompt, max_new_tokens=max_new_tokens, temperature=temperature)[0]["generated_text"]
34
- return out.split("AI:")[-1].strip() if "AI:" in out else out.strip()
35
-
36
  # -- STYLING --
37
  st.markdown("""
38
  <style>
@@ -51,38 +38,54 @@ st.title("🧠 TARS.help")
51
  st.markdown("### A minimal AI that listens, reflects, and replies.")
52
  st.markdown(f"πŸ—“οΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges")
53
 
54
- # -- USER INPUT --
 
 
 
 
 
 
55
  user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
56
 
57
- # -- MAIN FLOW --
 
 
 
 
 
 
 
 
 
 
58
  if user_input:
59
  context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
60
  with st.spinner("TARS is reflecting..."):
61
- time.sleep(1)
62
  if is_high_risk(user_input):
63
  response = "I'm really sorry you're feeling this way. You're not alone β€” please talk to someone you trust or a mental health professional. πŸ’™"
64
  else:
65
- prompt = f"You are a kind and calm AI assistant.\n{context}\nUser: {user_input}\nAI:"
66
- response = get_reply(prompt, max_new_tokens=150)
67
- timestamp = datetime.now().strftime("%H:%M")
68
- st.session_state.history.append(("🧍 You", user_input, timestamp))
69
- st.session_state.history.append(("πŸ€– TARS", response, timestamp))
70
 
71
- # -- CHAT DISPLAY --
72
  st.markdown("## πŸ—¨οΈ Session")
73
  for speaker, msg, time in st.session_state.history:
74
  st.markdown(f"**{speaker} [{time}]:** {msg}")
75
 
76
- # -- SUMMARY --
77
  if st.button("🧾 Generate Session Summary"):
78
  convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
79
- prompt = f"Summarize this conversation in 3 reflective sentences:\n{convo}\nSummary:"
80
- try:
81
- summary = get_reply(prompt, max_new_tokens=200, temperature=0.5)
82
- st.session_state.summary = summary
83
- except Exception as e:
84
- st.error("Summary generation failed.")
85
- st.exception(e)
 
86
 
87
  if st.session_state.summary:
88
  st.markdown("### 🧠 Session Note")
@@ -91,4 +94,4 @@ if st.session_state.summary:
91
 
92
  # -- FOOTER --
93
  st.markdown("---")
94
- st.caption("TARS is not a therapist, but a quiet assistant that reflects with you.")
 
2
  import time
3
  from datetime import datetime
4
  import streamlit as st
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
 
7
  # -- SETUP --
8
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
9
 
10
  @st.cache_resource
11
+ def load_respondent():
12
+ model_id = "microsoft/phi-2" # switched to a coherent, safe small model
13
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
14
+ model = AutoModelForCausalLM.from_pretrained(model_id)
15
+ return pipeline("text-generation", model=model, tokenizer=tokenizer)
16
+
17
+ generator = load_respondent()
 
 
 
 
18
 
19
  if "history" not in st.session_state:
20
  st.session_state.history = []
21
  st.session_state.summary = ""
22
 
 
 
 
 
 
 
 
 
 
23
  # -- STYLING --
24
  st.markdown("""
25
  <style>
 
38
  st.markdown("### A minimal AI that listens, reflects, and replies.")
39
  st.markdown(f"πŸ—“οΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges")
40
 
41
+ # -- HIGH-RISK PHRASE FILTER --
42
+ TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"]
43
+
44
+ def is_high_risk(text):
45
+ return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
46
+
47
+ # -- INPUT --
48
  user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
49
 
50
+ # -- REPLY FUNCTION --
51
+ def generate_reply(user_input, context):
52
+ prompt = f"""You are a calm, helpful AI assistant who supports users emotionally. Be kind and thoughtful in your reply.
53
+
54
+ {context}
55
+ User: {user_input}
56
+ AI:"""
57
+ response = generator(prompt, max_new_tokens=80, temperature=0.7)[0]['generated_text']
58
+ return response.split("AI:")[-1].strip()
59
+
60
+ # -- CONVERSATION FLOW --
61
  if user_input:
62
  context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
63
  with st.spinner("TARS is reflecting..."):
64
+ time.sleep(0.5)
65
  if is_high_risk(user_input):
66
  response = "I'm really sorry you're feeling this way. You're not alone β€” please talk to someone you trust or a mental health professional. πŸ’™"
67
  else:
68
+ response = generate_reply(user_input, context)
69
+ timestamp = datetime.now().strftime("%H:%M")
70
+ st.session_state.history.append(("🧍 You", user_input, timestamp))
71
+ st.session_state.history.append(("πŸ€– TARS", response, timestamp))
 
72
 
73
+ # -- DISPLAY HISTORY --
74
  st.markdown("## πŸ—¨οΈ Session")
75
  for speaker, msg, time in st.session_state.history:
76
  st.markdown(f"**{speaker} [{time}]:** {msg}")
77
 
78
+ # -- SESSION SUMMARY --
79
  if st.button("🧾 Generate Session Summary"):
80
  convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
81
+ prompt = f"""You are summarizing a thoughtful conversation between a user and an AI assistant. Write a kind, reflective note based on this interaction.
82
+
83
+ Conversation:
84
+ {convo}
85
+
86
+ Summary:"""
87
+ summary = generator(prompt, max_new_tokens=100, temperature=0.5)[0]['generated_text']
88
+ st.session_state.summary = summary.split("Summary:")[-1].strip()
89
 
90
  if st.session_state.summary:
91
  st.markdown("### 🧠 Session Note")
 
94
 
95
  # -- FOOTER --
96
  st.markdown("---")
97
+ st.caption("TARS is not a therapist but a friend. Just a quiet assistant that reflects with you.")