Divymakesml commited on
Commit
4798805
Β·
verified Β·
1 Parent(s): e988eb0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -13
app.py CHANGED
@@ -2,17 +2,22 @@ import os
2
  import time
3
  from datetime import datetime
4
  import streamlit as st
5
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
6
 
7
  # -- SETUP --
8
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
9
 
10
  @st.cache_resource
11
  def load_respondent():
12
- model_id = "google/flan-t5-small"
13
  tokenizer = AutoTokenizer.from_pretrained(model_id)
14
- model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
15
- return pipeline("text2text-generation", model=model, tokenizer=tokenizer)
 
 
 
 
 
16
 
17
  generator = load_respondent()
18
 
@@ -48,21 +53,25 @@ def is_high_risk(text):
48
  user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
49
 
50
  # -- REPLY FUNCTION --
51
- def generate_reply(context):
52
- prompt = f"Respond empathetically to this conversation:\n{context}"
53
- result = generator(prompt, max_new_tokens=80, temperature=0.7)[0]["generated_text"]
54
- return result.strip()
 
 
 
 
55
 
56
  # -- CONVERSATION FLOW --
57
  if user_input:
58
  context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
59
  with st.spinner("TARS is reflecting..."):
60
- time.sleep(0.5)
61
  if is_high_risk(user_input):
62
  response = "I'm really sorry you're feeling this way. You're not alone β€” please talk to someone you trust or a mental health professional. πŸ’™"
63
  else:
64
  full_context = context + f"\nUser: {user_input}"
65
- response = generate_reply(full_context)
66
  timestamp = datetime.now().strftime("%H:%M")
67
  st.session_state.history.append(("🧍 You", user_input, timestamp))
68
  st.session_state.history.append(("πŸ€– TARS", response, timestamp))
@@ -75,10 +84,14 @@ for speaker, msg, time in st.session_state.history:
75
  # -- SESSION SUMMARY --
76
  if st.button("🧾 Generate Session Summary"):
77
  convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
78
- prompt = f"Summarize this conversation in 2-3 sentences:\n{convo}"
 
 
 
 
79
  try:
80
- output = generator(prompt, max_new_tokens=120, temperature=0.5)[0]['generated_text']
81
- st.session_state.summary = output.strip()
82
  except Exception as e:
83
  st.error("❌ Summary generation failed.")
84
  st.exception(e)
 
2
  import time
3
  from datetime import datetime
4
  import streamlit as st
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
 
7
  # -- SETUP --
8
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
9
 
10
  @st.cache_resource
11
  def load_respondent():
12
+ model_id = "mistralai/Mistral-7B-Instruct-v0.1"
13
  tokenizer = AutoTokenizer.from_pretrained(model_id)
14
+ model = AutoModelForCausalLM.from_pretrained(
15
+ model_id,
16
+ device_map="auto",
17
+ trust_remote_code=True,
18
+ torch_dtype="auto"
19
+ )
20
+ return pipeline("text-generation", model=model, tokenizer=tokenizer)
21
 
22
  generator = load_respondent()
23
 
 
53
  user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
54
 
55
  # -- REPLY FUNCTION --
56
+ def generate_reply(user_input, context):
57
+ prompt = f"""You are a kind and empathetic AI assistant. Respond thoughtfully based on the following conversation:
58
+
59
+ {context}
60
+ User: {user_input}
61
+ AI:"""
62
+ response = generator(prompt, max_new_tokens=150, temperature=0.7)[0]['generated_text']
63
+ return response.split("AI:")[-1].strip()
64
 
65
  # -- CONVERSATION FLOW --
66
  if user_input:
67
  context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
68
  with st.spinner("TARS is reflecting..."):
69
+ time.sleep(1.2)
70
  if is_high_risk(user_input):
71
  response = "I'm really sorry you're feeling this way. You're not alone β€” please talk to someone you trust or a mental health professional. πŸ’™"
72
  else:
73
  full_context = context + f"\nUser: {user_input}"
74
+ response = generate_reply(user_input, context)
75
  timestamp = datetime.now().strftime("%H:%M")
76
  st.session_state.history.append(("🧍 You", user_input, timestamp))
77
  st.session_state.history.append(("πŸ€– TARS", response, timestamp))
 
84
  # -- SESSION SUMMARY --
85
  if st.button("🧾 Generate Session Summary"):
86
  convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
87
+ prompt = f"""Summarize the emotional tone and key themes from this conversation in 3 sentences:
88
+
89
+ {convo}
90
+
91
+ Summary:"""
92
  try:
93
+ output = generator(prompt, max_new_tokens=200, temperature=0.5)[0]['generated_text']
94
+ st.session_state.summary = output.split("Summary:")[-1].strip()
95
  except Exception as e:
96
  st.error("❌ Summary generation failed.")
97
  st.exception(e)