Spaces:
Sleeping
Sleeping
File size: 3,366 Bytes
f2169e5 e988eb0 25a7813 b5b9af8 e988eb0 b5b9af8 25a7813 b5b9af8 25a7813 b5b9af8 25a7813 b5b9af8 95b2ec1 e988eb0 25a7813 e988eb0 95b2ec1 25a7813 e988eb0 b5b9af8 25a7813 95b2ec1 e988eb0 4798805 e988eb0 25a7813 e988eb0 25a7813 e988eb0 25a7813 e988eb0 25a7813 e988eb0 25a7813 e988eb0 25a7813 e988eb0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
import os
import time
from datetime import datetime
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# -- SETUP --
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
@st.cache_resource
def load_model():
model_id = "google/flan-t5-base"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
return tokenizer, model
tokenizer, model = load_model()
if "history" not in st.session_state:
st.session_state.history = []
st.session_state.summary = ""
# -- TEXT GENERATION FUNCTION --
def generate_text(prompt, max_new_tokens=150):
inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
outputs = model.generate(**inputs, max_new_tokens=max_new_tokens)
return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
# -- HIGH-RISK FILTER --
TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"]
def is_high_risk(text):
return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
# -- STYLING --
st.markdown("""
<style>
body {
background-color: #111827;
color: #f3f4f6;
}
.stTextInput > div > div > input {
color: #f3f4f6;
}
</style>
""", unsafe_allow_html=True)
# -- HEADER --
st.title("π§ TARS.help")
st.markdown("### A minimal AI that listens, reflects, and replies.")
st.markdown(f"ποΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges")
# -- USER INPUT --
user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
# -- MAIN CHAT LOGIC --
if user_input:
context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
with st.spinner("TARS is reflecting..."):
time.sleep(1.2)
if is_high_risk(user_input):
response = "I'm really sorry you're feeling this way. You're not alone β please talk to someone you trust or a mental health professional. π"
else:
prompt = f"Respond with empathy:\n{context}\nUser: {user_input}"
response = generate_text(prompt, max_new_tokens=100)
timestamp = datetime.now().strftime("%H:%M")
st.session_state.history.append(("π§ You", user_input, timestamp))
st.session_state.history.append(("π€ TARS", response, timestamp))
# -- DISPLAY CHAT --
st.markdown("## π¨οΈ Session")
for speaker, msg, time in st.session_state.history:
st.markdown(f"**{speaker} [{time}]:** {msg}")
# -- SUMMARY GENERATION --
if st.button("π§Ύ Generate Session Summary"):
convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
summary_prompt = f"Summarize this conversation in 2-3 thoughtful sentences:\n{convo}"
try:
summary = generate_text(summary_prompt, max_new_tokens=150)
st.session_state.summary = summary
except Exception as e:
st.error("β Summary generation failed.")
st.exception(e)
# -- DISPLAY SUMMARY --
if st.session_state.summary:
st.markdown("### π§ Session Note")
st.markdown(st.session_state.summary)
st.download_button("π₯ Download Summary", st.session_state.summary, file_name="tars_session.txt")
# -- FOOTER --
st.markdown("---")
st.caption("TARS is not a therapist but a quiet assistant that reflects with you.")
|