Divymakesml commited on
Commit
25a7813
Β·
verified Β·
1 Parent(s): 4798805

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -43
app.py CHANGED
@@ -2,29 +2,35 @@ import os
2
  import time
3
  from datetime import datetime
4
  import streamlit as st
5
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
 
7
  # -- SETUP --
8
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
9
 
10
  @st.cache_resource
11
- def load_respondent():
12
- model_id = "mistralai/Mistral-7B-Instruct-v0.1"
13
  tokenizer = AutoTokenizer.from_pretrained(model_id)
14
- model = AutoModelForCausalLM.from_pretrained(
15
- model_id,
16
- device_map="auto",
17
- trust_remote_code=True,
18
- torch_dtype="auto"
19
- )
20
- return pipeline("text-generation", model=model, tokenizer=tokenizer)
21
 
22
- generator = load_respondent()
23
 
24
  if "history" not in st.session_state:
25
  st.session_state.history = []
26
  st.session_state.summary = ""
27
 
 
 
 
 
 
 
 
 
 
 
 
28
  # -- STYLING --
29
  st.markdown("""
30
  <style>
@@ -43,26 +49,10 @@ st.title("🧠 TARS.help")
43
  st.markdown("### A minimal AI that listens, reflects, and replies.")
44
  st.markdown(f"πŸ—“οΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges")
45
 
46
- # -- SAFETY FILTER --
47
- TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"]
48
-
49
- def is_high_risk(text):
50
- return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
51
-
52
- # -- INPUT --
53
  user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
54
 
55
- # -- REPLY FUNCTION --
56
- def generate_reply(user_input, context):
57
- prompt = f"""You are a kind and empathetic AI assistant. Respond thoughtfully based on the following conversation:
58
-
59
- {context}
60
- User: {user_input}
61
- AI:"""
62
- response = generator(prompt, max_new_tokens=150, temperature=0.7)[0]['generated_text']
63
- return response.split("AI:")[-1].strip()
64
-
65
- # -- CONVERSATION FLOW --
66
  if user_input:
67
  context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
68
  with st.spinner("TARS is reflecting..."):
@@ -70,32 +60,29 @@ if user_input:
70
  if is_high_risk(user_input):
71
  response = "I'm really sorry you're feeling this way. You're not alone β€” please talk to someone you trust or a mental health professional. πŸ’™"
72
  else:
73
- full_context = context + f"\nUser: {user_input}"
74
- response = generate_reply(user_input, context)
75
- timestamp = datetime.now().strftime("%H:%M")
76
- st.session_state.history.append(("🧍 You", user_input, timestamp))
77
- st.session_state.history.append(("πŸ€– TARS", response, timestamp))
78
 
79
- # -- DISPLAY HISTORY --
80
  st.markdown("## πŸ—¨οΈ Session")
81
  for speaker, msg, time in st.session_state.history:
82
  st.markdown(f"**{speaker} [{time}]:** {msg}")
83
 
84
- # -- SESSION SUMMARY --
85
  if st.button("🧾 Generate Session Summary"):
86
  convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
87
- prompt = f"""Summarize the emotional tone and key themes from this conversation in 3 sentences:
88
-
89
- {convo}
90
-
91
- Summary:"""
92
  try:
93
- output = generator(prompt, max_new_tokens=200, temperature=0.5)[0]['generated_text']
94
- st.session_state.summary = output.split("Summary:")[-1].strip()
95
  except Exception as e:
96
  st.error("❌ Summary generation failed.")
97
  st.exception(e)
98
 
 
99
  if st.session_state.summary:
100
  st.markdown("### 🧠 Session Note")
101
  st.markdown(st.session_state.summary)
 
2
  import time
3
  from datetime import datetime
4
  import streamlit as st
5
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
6
 
7
  # -- SETUP --
8
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
9
 
10
  @st.cache_resource
11
+ def load_model():
12
+ model_id = "google/flan-t5-base"
13
  tokenizer = AutoTokenizer.from_pretrained(model_id)
14
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
15
+ return tokenizer, model
 
 
 
 
 
16
 
17
+ tokenizer, model = load_model()
18
 
19
  if "history" not in st.session_state:
20
  st.session_state.history = []
21
  st.session_state.summary = ""
22
 
23
+ # -- TEXT GENERATION FUNCTION --
24
+ def generate_text(prompt, max_new_tokens=150):
25
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
26
+ outputs = model.generate(**inputs, max_new_tokens=max_new_tokens)
27
+ return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
28
+
29
+ # -- HIGH-RISK FILTER --
30
+ TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"]
31
+ def is_high_risk(text):
32
+ return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
33
+
34
  # -- STYLING --
35
  st.markdown("""
36
  <style>
 
49
  st.markdown("### A minimal AI that listens, reflects, and replies.")
50
  st.markdown(f"πŸ—“οΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges")
51
 
52
+ # -- USER INPUT --
 
 
 
 
 
 
53
  user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
54
 
55
+ # -- MAIN CHAT LOGIC --
 
 
 
 
 
 
 
 
 
 
56
  if user_input:
57
  context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
58
  with st.spinner("TARS is reflecting..."):
 
60
  if is_high_risk(user_input):
61
  response = "I'm really sorry you're feeling this way. You're not alone β€” please talk to someone you trust or a mental health professional. πŸ’™"
62
  else:
63
+ prompt = f"Respond with empathy:\n{context}\nUser: {user_input}"
64
+ response = generate_text(prompt, max_new_tokens=100)
65
+ timestamp = datetime.now().strftime("%H:%M")
66
+ st.session_state.history.append(("🧍 You", user_input, timestamp))
67
+ st.session_state.history.append(("πŸ€– TARS", response, timestamp))
68
 
69
+ # -- DISPLAY CHAT --
70
  st.markdown("## πŸ—¨οΈ Session")
71
  for speaker, msg, time in st.session_state.history:
72
  st.markdown(f"**{speaker} [{time}]:** {msg}")
73
 
74
+ # -- SUMMARY GENERATION --
75
  if st.button("🧾 Generate Session Summary"):
76
  convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
77
+ summary_prompt = f"Summarize this conversation in 2-3 thoughtful sentences:\n{convo}"
 
 
 
 
78
  try:
79
+ summary = generate_text(summary_prompt, max_new_tokens=150)
80
+ st.session_state.summary = summary
81
  except Exception as e:
82
  st.error("❌ Summary generation failed.")
83
  st.exception(e)
84
 
85
+ # -- DISPLAY SUMMARY --
86
  if st.session_state.summary:
87
  st.markdown("### 🧠 Session Note")
88
  st.markdown(st.session_state.summary)