Spaces:
Sleeping
Sleeping
File size: 3,051 Bytes
e988eb0 4e80759 9f2d402 4e80759 b5b9af8 e988eb0 b5b9af8 4e80759 9f2d402 0eefba5 4e80759 b5b9af8 95b2ec1 e988eb0 9f2d402 4e80759 9f2d402 4e80759 9f2d402 e988eb0 4e80759 e988eb0 95b2ec1 e988eb0 b5b9af8 95b2ec1 e988eb0 9f2d402 e988eb0 4e80759 e988eb0 4e80759 9f2d402 4e80759 9f2d402 4e80759 9f2d402 e988eb0 4e80759 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import streamlit as st
from datetime import datetime
import time
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
import os
# -- SETUP --
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
@st.cache_resource
def load_model():
model_id = "tiiuae/falcon-7b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
return pipeline("text-generation", model=model, tokenizer=tokenizer)
generator = load_model()
if "history" not in st.session_state:
st.session_state.history = []
st.session_state.summary = ""
TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"]
def is_high_risk(text):
return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
def get_response(prompt, max_new_tokens=150, temperature=0.7):
output = generator(prompt, max_new_tokens=max_new_tokens, temperature=temperature)[0]["generated_text"]
return output.split("AI:")[-1].strip() if "AI:" in output else output.strip()
st.title("π§ TARS.help")
st.markdown("### A quiet AI that reflects and replies.")
st.markdown(f"ποΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges")
user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
if user_input:
context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
with st.spinner("TARS is reflecting..."):
time.sleep(1)
if is_high_risk(user_input):
response = "I'm really sorry you're feeling this way. You're not alone β please talk to someone you trust or a mental health professional. π"
else:
prompt = f"You are a calm, empathetic AI assistant.\n{context}\nUser: {user_input}\nAI:"
response = get_response(prompt)
timestamp = datetime.now().strftime("%H:%M")
st.session_state.history.append(("π§ You", user_input, timestamp))
st.session_state.history.append(("π€ TARS", response, timestamp))
st.markdown("## π¨οΈ Session")
for speaker, msg, time in st.session_state.history:
st.markdown(f"**{speaker} [{time}]:** {msg}")
if st.button("π§Ύ Generate Session Summary"):
convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
prompt = f"Summarize the emotional tone and themes in this conversation:\n{convo}\nSummary:"
try:
summary = get_response(prompt, max_new_tokens=200, temperature=0.5)
st.session_state.summary = summary
except Exception as e:
st.error("Summary generation failed.")
st.exception(e)
if st.session_state.summary:
st.markdown("### π§ Session Note")
st.markdown(st.session_state.summary)
st.download_button("π₯ Download Summary", st.session_state.summary, file_name="tars_session.txt")
st.markdown("---")
st.caption("TARS is not a therapist. If you're in crisis, please seek help from a professional.")
|