Divymakesml commited on
Commit
4e80759
Β·
verified Β·
1 Parent(s): 9f2d402

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -37
app.py CHANGED
@@ -1,56 +1,40 @@
1
- import os
2
- import time
3
- from datetime import datetime
4
  import streamlit as st
 
 
5
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
 
6
 
7
  # -- SETUP --
8
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
9
 
10
  @st.cache_resource
11
- def load_pipeline():
12
  model_id = "tiiuae/falcon-7b-instruct"
13
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
14
  model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
15
  return pipeline("text-generation", model=model, tokenizer=tokenizer)
16
 
17
- generator = load_pipeline()
18
 
19
  if "history" not in st.session_state:
20
  st.session_state.history = []
21
  st.session_state.summary = ""
22
 
23
- # -- UTILS --
24
  TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"]
 
25
  def is_high_risk(text):
26
  return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
27
 
28
- def get_reply(prompt, max_new_tokens=150, temperature=0.7):
29
  output = generator(prompt, max_new_tokens=max_new_tokens, temperature=temperature)[0]["generated_text"]
30
  return output.split("AI:")[-1].strip() if "AI:" in output else output.strip()
31
 
32
- # -- STYLING --
33
- st.markdown("""
34
- <style>
35
- body {
36
- background-color: #111827;
37
- color: #f3f4f6;
38
- }
39
- .stTextInput > div > div > input {
40
- color: #f3f4f6;
41
- }
42
- </style>
43
- """, unsafe_allow_html=True)
44
-
45
- # -- HEADER --
46
  st.title("🧠 TARS.help")
47
- st.markdown("### A minimal AI that listens, reflects, and replies.")
48
  st.markdown(f"πŸ—“οΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges")
49
 
50
- # -- USER INPUT --
51
  user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
52
 
53
- # -- MAIN CHAT FLOW --
54
  if user_input:
55
  context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
56
  with st.spinner("TARS is reflecting..."):
@@ -58,34 +42,30 @@ if user_input:
58
  if is_high_risk(user_input):
59
  response = "I'm really sorry you're feeling this way. You're not alone β€” please talk to someone you trust or a mental health professional. πŸ’™"
60
  else:
61
- prompt = f"You are a compassionate AI therapist.\n{context}\nUser: {user_input}\nAI:"
62
- response = get_reply(prompt)
63
- timestamp = datetime.now().strftime("%H:%M")
64
- st.session_state.history.append(("🧍 You", user_input, timestamp))
65
- st.session_state.history.append(("πŸ€– TARS", response, timestamp))
66
 
67
- # -- DISPLAY CHAT --
68
  st.markdown("## πŸ—¨οΈ Session")
69
  for speaker, msg, time in st.session_state.history:
70
  st.markdown(f"**{speaker} [{time}]:** {msg}")
71
 
72
- # -- SUMMARY GENERATION --
73
  if st.button("🧾 Generate Session Summary"):
74
  convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
75
- summary_prompt = f"Summarize the tone and content of this therapy session in 3 thoughtful sentences:\n{convo}\nSummary:"
76
  try:
77
- summary = get_reply(summary_prompt, max_new_tokens=200, temperature=0.5)
78
  st.session_state.summary = summary
79
  except Exception as e:
80
- st.error("❌ Summary generation failed.")
81
  st.exception(e)
82
 
83
- # -- DISPLAY SUMMARY --
84
  if st.session_state.summary:
85
  st.markdown("### 🧠 Session Note")
86
  st.markdown(st.session_state.summary)
87
  st.download_button("πŸ“₯ Download Summary", st.session_state.summary, file_name="tars_session.txt")
88
 
89
- # -- FOOTER --
90
  st.markdown("---")
91
- st.caption("TARS is not a therapist, but a quiet assistant that reflects with you.")
 
 
 
 
1
  import streamlit as st
2
+ from datetime import datetime
3
+ import time
4
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
5
+ import os
6
 
7
  # -- SETUP --
8
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
9
 
10
  @st.cache_resource
11
+ def load_model():
12
  model_id = "tiiuae/falcon-7b-instruct"
13
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
14
  model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
15
  return pipeline("text-generation", model=model, tokenizer=tokenizer)
16
 
17
+ generator = load_model()
18
 
19
  if "history" not in st.session_state:
20
  st.session_state.history = []
21
  st.session_state.summary = ""
22
 
 
23
  TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"]
24
+
25
  def is_high_risk(text):
26
  return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
27
 
28
+ def get_response(prompt, max_new_tokens=150, temperature=0.7):
29
  output = generator(prompt, max_new_tokens=max_new_tokens, temperature=temperature)[0]["generated_text"]
30
  return output.split("AI:")[-1].strip() if "AI:" in output else output.strip()
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  st.title("🧠 TARS.help")
33
+ st.markdown("### A quiet AI that reflects and replies.")
34
  st.markdown(f"πŸ—“οΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges")
35
 
 
36
  user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
37
 
 
38
  if user_input:
39
  context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
40
  with st.spinner("TARS is reflecting..."):
 
42
  if is_high_risk(user_input):
43
  response = "I'm really sorry you're feeling this way. You're not alone β€” please talk to someone you trust or a mental health professional. πŸ’™"
44
  else:
45
+ prompt = f"You are a calm, empathetic AI assistant.\n{context}\nUser: {user_input}\nAI:"
46
+ response = get_response(prompt)
47
+ timestamp = datetime.now().strftime("%H:%M")
48
+ st.session_state.history.append(("🧍 You", user_input, timestamp))
49
+ st.session_state.history.append(("πŸ€– TARS", response, timestamp))
50
 
 
51
  st.markdown("## πŸ—¨οΈ Session")
52
  for speaker, msg, time in st.session_state.history:
53
  st.markdown(f"**{speaker} [{time}]:** {msg}")
54
 
 
55
  if st.button("🧾 Generate Session Summary"):
56
  convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
57
+ prompt = f"Summarize the emotional tone and themes in this conversation:\n{convo}\nSummary:"
58
  try:
59
+ summary = get_response(prompt, max_new_tokens=200, temperature=0.5)
60
  st.session_state.summary = summary
61
  except Exception as e:
62
+ st.error("Summary generation failed.")
63
  st.exception(e)
64
 
 
65
  if st.session_state.summary:
66
  st.markdown("### 🧠 Session Note")
67
  st.markdown(st.session_state.summary)
68
  st.download_button("πŸ“₯ Download Summary", st.session_state.summary, file_name="tars_session.txt")
69
 
 
70
  st.markdown("---")
71
+ st.caption("TARS is not a therapist. If you're in crisis, please seek help from a professional.")