Spaces:
Sleeping
Sleeping
import os | |
import time | |
from datetime import datetime | |
import streamlit as st | |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
# -- SETUP -- | |
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" | |
def load_pipeline(): | |
model_id = "tiiuae/falcon-7b-instruct" | |
pipe = pipeline( | |
"text-generation", | |
model=AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True), | |
tokenizer=AutoTokenizer.from_pretrained(model_id, trust_remote_code=True), | |
device_map="auto" | |
) | |
return pipe | |
generator = load_pipeline() | |
if "history" not in st.session_state: | |
st.session_state.history = [] | |
st.session_state.summary = "" | |
# -- UTILS -- | |
TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"] | |
def is_high_risk(text): | |
return any(phrase in text.lower() for phrase in TRIGGER_PHRASES) | |
def get_reply(prompt, max_new_tokens=150, temperature=0.7): | |
out = generator(prompt, max_new_tokens=max_new_tokens, temperature=temperature)[0]["generated_text"] | |
return out.split("AI:")[-1].strip() if "AI:" in out else out.strip() | |
# -- STYLING -- | |
st.markdown(""" | |
<style> | |
body { | |
background-color: #111827; | |
color: #f3f4f6; | |
} | |
.stTextInput > div > div > input { | |
color: #f3f4f6; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
# -- HEADER -- | |
st.title("π§ TARS.help") | |
st.markdown("### A minimal AI that listens, reflects, and replies.") | |
st.markdown(f"ποΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges") | |
# -- USER INPUT -- | |
user_input = st.text_input("How are you feeling today?", placeholder="Start typing...") | |
# -- MAIN FLOW -- | |
if user_input: | |
context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]]) | |
with st.spinner("TARS is reflecting..."): | |
time.sleep(1) | |
if is_high_risk(user_input): | |
response = "I'm really sorry you're feeling this way. You're not alone β please talk to someone you trust or a mental health professional. π" | |
else: | |
prompt = f"You are a kind and calm AI assistant.\n{context}\nUser: {user_input}\nAI:" | |
response = get_reply(prompt, max_new_tokens=150) | |
timestamp = datetime.now().strftime("%H:%M") | |
st.session_state.history.append(("π§ You", user_input, timestamp)) | |
st.session_state.history.append(("π€ TARS", response, timestamp)) | |
# -- CHAT DISPLAY -- | |
st.markdown("## π¨οΈ Session") | |
for speaker, msg, time in st.session_state.history: | |
st.markdown(f"**{speaker} [{time}]:** {msg}") | |
# -- SUMMARY -- | |
if st.button("π§Ύ Generate Session Summary"): | |
convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history]) | |
prompt = f"Summarize this conversation in 3 reflective sentences:\n{convo}\nSummary:" | |
try: | |
summary = get_reply(prompt, max_new_tokens=200, temperature=0.5) | |
st.session_state.summary = summary | |
except Exception as e: | |
st.error("Summary generation failed.") | |
st.exception(e) | |
if st.session_state.summary: | |
st.markdown("### π§ Session Note") | |
st.markdown(st.session_state.summary) | |
st.download_button("π₯ Download Summary", st.session_state.summary, file_name="tars_session.txt") | |
# -- FOOTER -- | |
st.markdown("---") | |
st.caption("TARS is not a therapist, but a quiet assistant that reflects with you.") | |