Update app.py
Browse files
app.py
CHANGED
@@ -30,9 +30,17 @@ def load_sentiment_pipeline():
|
|
30 |
def load_keybert_model():
|
31 |
return KeyBERT(model="all-MiniLM-L6-v2")
|
32 |
|
33 |
-
# ─── BlenderBot Response
|
34 |
@st.cache_resource
|
35 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
# Use BlenderBot 400M Distill for text generation
|
37 |
return pipeline(
|
38 |
"text2text-generation",
|
@@ -100,22 +108,33 @@ def main():
|
|
100 |
st.markdown(f"**Highest Sentiment:** **{max_label}** ({max_score:.4f})")
|
101 |
# ────────────────────────────────────────────────────────────────────
|
102 |
|
103 |
-
# Generate appropriate reply
|
104 |
-
|
105 |
if max_label in ["Positive", "Very Positive"]:
|
106 |
-
|
107 |
f"You are a friendly customer success representative. The customer said: \"{review}\". "
|
108 |
"Write two sentences to express gratitude and highlight their positive experience."
|
109 |
)
|
110 |
else:
|
111 |
-
|
112 |
f"You are a helpful customer support specialist. The customer said: \"{review}\". "
|
113 |
f"Identified issues: {', '.join([kw for kw, _ in keywords])}. "
|
114 |
"First, ask 1-2 clarifying questions to understand their situation. "
|
115 |
"Then provide two concrete suggestions or next steps to address these issues."
|
116 |
)
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
st.subheader("Generated Reply")
|
121 |
st.write(reply)
|
|
|
30 |
def load_keybert_model():
|
31 |
return KeyBERT(model="all-MiniLM-L6-v2")
|
32 |
|
33 |
+
# ─── BlenderBot Response Components ─────────────────────────────────────────
|
34 |
@st.cache_resource
|
35 |
+
def load_response_components():
|
36 |
+
# Load tokenizer and model directly for manual generation with truncation
|
37 |
+
tok = AutoTokenizer.from_pretrained(
|
38 |
+
"facebook/blenderbot-400M-distill",
|
39 |
+
use_fast=True
|
40 |
+
)
|
41 |
+
mdl = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill")
|
42 |
+
return tok, mdl
|
43 |
+
:
|
44 |
# Use BlenderBot 400M Distill for text generation
|
45 |
return pipeline(
|
46 |
"text2text-generation",
|
|
|
108 |
st.markdown(f"**Highest Sentiment:** **{max_label}** ({max_score:.4f})")
|
109 |
# ────────────────────────────────────────────────────────────────────
|
110 |
|
111 |
+
# Generate appropriate reply using manual tokenization & generation
|
112 |
+
tok, mdl = load_response_components()
|
113 |
if max_label in ["Positive", "Very Positive"]:
|
114 |
+
prompt_text = (
|
115 |
f"You are a friendly customer success representative. The customer said: \"{review}\". "
|
116 |
"Write two sentences to express gratitude and highlight their positive experience."
|
117 |
)
|
118 |
else:
|
119 |
+
prompt_text = (
|
120 |
f"You are a helpful customer support specialist. The customer said: \"{review}\". "
|
121 |
f"Identified issues: {', '.join([kw for kw, _ in keywords])}. "
|
122 |
"First, ask 1-2 clarifying questions to understand their situation. "
|
123 |
"Then provide two concrete suggestions or next steps to address these issues."
|
124 |
)
|
125 |
+
# Tokenize with truncation to avoid out-of-range embeddings
|
126 |
+
inputs = tok(
|
127 |
+
prompt_text,
|
128 |
+
return_tensors="pt",
|
129 |
+
truncation=True,
|
130 |
+
max_length=tok.model_max_length
|
131 |
+
)
|
132 |
+
outputs = mdl.generate(
|
133 |
+
**inputs,
|
134 |
+
max_new_tokens=150,
|
135 |
+
do_sample=False
|
136 |
+
)
|
137 |
+
reply = tok.decode(outputs[0], skip_special_tokens=True).strip()
|
138 |
|
139 |
st.subheader("Generated Reply")
|
140 |
st.write(reply)
|