Spaces:
Sleeping
Sleeping
File size: 3,411 Bytes
f2169e5 e988eb0 4208dd4 b5b9af8 e988eb0 b5b9af8 4208dd4 b5b9af8 4208dd4 b5b9af8 95b2ec1 e988eb0 4208dd4 25a7813 4208dd4 5b5be76 e988eb0 95b2ec1 25a7813 e988eb0 b5b9af8 4208dd4 95b2ec1 e988eb0 4208dd4 e988eb0 4208dd4 25a7813 e988eb0 4208dd4 e988eb0 5b5be76 e988eb0 4208dd4 e988eb0 4208dd4 e988eb0 4208dd4 e988eb0 4208dd4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import os
import time
from datetime import datetime
import streamlit as st
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
# -- SETUP --
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
@st.cache_resource
def load_pipeline():
model_id = "tiiuae/falcon-7b-instruct"
pipe = pipeline(
"text-generation",
model=AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True),
tokenizer=AutoTokenizer.from_pretrained(model_id, trust_remote_code=True),
device_map="auto"
)
return pipe
generator = load_pipeline()
if "history" not in st.session_state:
st.session_state.history = []
st.session_state.summary = ""
# -- UTILS --
TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"]
def is_high_risk(text):
return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
def get_reply(prompt, max_new_tokens=150, temperature=0.7):
out = generator(prompt, max_new_tokens=max_new_tokens, temperature=temperature)[0]["generated_text"]
return out.split("AI:")[-1].strip() if "AI:" in out else out.strip()
# -- STYLING --
st.markdown("""
<style>
body {
background-color: #111827;
color: #f3f4f6;
}
.stTextInput > div > div > input {
color: #f3f4f6;
}
</style>
""", unsafe_allow_html=True)
# -- HEADER --
st.title("π§ TARS.help")
st.markdown("### A minimal AI that listens, reflects, and replies.")
st.markdown(f"ποΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges")
# -- USER INPUT --
user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
# -- MAIN FLOW --
if user_input:
context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
with st.spinner("TARS is reflecting..."):
time.sleep(1)
if is_high_risk(user_input):
response = "I'm really sorry you're feeling this way. You're not alone β please talk to someone you trust or a mental health professional. π"
else:
prompt = f"You are a kind and calm AI assistant.\n{context}\nUser: {user_input}\nAI:"
response = get_reply(prompt, max_new_tokens=150)
timestamp = datetime.now().strftime("%H:%M")
st.session_state.history.append(("π§ You", user_input, timestamp))
st.session_state.history.append(("π€ TARS", response, timestamp))
# -- CHAT DISPLAY --
st.markdown("## π¨οΈ Session")
for speaker, msg, time in st.session_state.history:
st.markdown(f"**{speaker} [{time}]:** {msg}")
# -- SUMMARY --
if st.button("π§Ύ Generate Session Summary"):
convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
prompt = f"Summarize this conversation in 3 reflective sentences:\n{convo}\nSummary:"
try:
summary = get_reply(prompt, max_new_tokens=200, temperature=0.5)
st.session_state.summary = summary
except Exception as e:
st.error("Summary generation failed.")
st.exception(e)
if st.session_state.summary:
st.markdown("### π§ Session Note")
st.markdown(st.session_state.summary)
st.download_button("π₯ Download Summary", st.session_state.summary, file_name="tars_session.txt")
# -- FOOTER --
st.markdown("---")
st.caption("TARS is not a therapist, but a quiet assistant that reflects with you.")
|