Divymakesml commited on
Commit
fa39eb6
·
verified ·
1 Parent(s): 4e80759

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -9,9 +9,9 @@ os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
9
 
10
  @st.cache_resource
11
  def load_model():
12
- model_id = "tiiuae/falcon-7b-instruct"
13
- tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
14
- model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
15
  return pipeline("text-generation", model=model, tokenizer=tokenizer)
16
 
17
  generator = load_model()
@@ -25,9 +25,9 @@ TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "
25
  def is_high_risk(text):
26
  return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
27
 
28
- def get_response(prompt, max_new_tokens=150, temperature=0.7):
29
  output = generator(prompt, max_new_tokens=max_new_tokens, temperature=temperature)[0]["generated_text"]
30
- return output.split("AI:")[-1].strip() if "AI:" in output else output.strip()
31
 
32
  st.title("🧠 TARS.help")
33
  st.markdown("### A quiet AI that reflects and replies.")
@@ -56,7 +56,7 @@ if st.button("🧾 Generate Session Summary"):
56
  convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
57
  prompt = f"Summarize the emotional tone and themes in this conversation:\n{convo}\nSummary:"
58
  try:
59
- summary = get_response(prompt, max_new_tokens=200, temperature=0.5)
60
  st.session_state.summary = summary
61
  except Exception as e:
62
  st.error("Summary generation failed.")
 
9
 
10
  @st.cache_resource
11
  def load_model():
12
+ model_id = "sshleifer/tiny-gpt2"
13
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
14
+ model = AutoModelForCausalLM.from_pretrained(model_id)
15
  return pipeline("text-generation", model=model, tokenizer=tokenizer)
16
 
17
  generator = load_model()
 
25
  def is_high_risk(text):
26
  return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
27
 
28
+ def get_response(prompt, max_new_tokens=100, temperature=0.7):
29
  output = generator(prompt, max_new_tokens=max_new_tokens, temperature=temperature)[0]["generated_text"]
30
+ return output.strip()
31
 
32
  st.title("🧠 TARS.help")
33
  st.markdown("### A quiet AI that reflects and replies.")
 
56
  convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
57
  prompt = f"Summarize the emotional tone and themes in this conversation:\n{convo}\nSummary:"
58
  try:
59
+ summary = get_response(prompt, max_new_tokens=100, temperature=0.5)
60
  st.session_state.summary = summary
61
  except Exception as e:
62
  st.error("Summary generation failed.")