Update app.py
Browse files
app.py
CHANGED
@@ -32,13 +32,15 @@ def load_keybert_model():
|
|
32 |
|
33 |
# ─── FLAN-T5 Generation Pipeline ────────────────────────────────────────────
|
34 |
@st.cache_resource
|
35 |
-
def
|
36 |
seq_tok = AutoTokenizer.from_pretrained("google/flan-t5-base")
|
37 |
seq_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
|
38 |
return pipeline(
|
39 |
"text2text-generation",
|
40 |
model=seq_model,
|
41 |
-
tokenizer=seq_tok
|
|
|
|
|
42 |
)
|
43 |
|
44 |
LABEL_MAP = {
|
@@ -60,30 +62,20 @@ def main():
|
|
60 |
st.warning("Please enter a review to analyze.")
|
61 |
return
|
62 |
|
63 |
-
|
64 |
-
|
65 |
-
# Load models
|
66 |
-
progress.text("Loading models...")
|
67 |
-
sentiment_pipeline = load_sentiment_pipeline()
|
68 |
-
kw_model = load_keybert_model()
|
69 |
-
generation_pipeline = load_flant5_pipeline()
|
70 |
-
progress.progress(20)
|
71 |
-
|
72 |
# Sentiment Analysis
|
73 |
-
|
74 |
raw_scores = sentiment_pipeline(review)[0]
|
75 |
sentiment_results = {LABEL_MAP[item['label']]: float(item['score']) for item in raw_scores}
|
76 |
-
progress.progress(40)
|
77 |
|
78 |
# Keyword Extraction
|
79 |
-
|
80 |
keywords = kw_model.extract_keywords(
|
81 |
review,
|
82 |
keyphrase_ngram_range=(1, 2),
|
83 |
stop_words="english",
|
84 |
top_n=3
|
85 |
)
|
86 |
-
progress.progress(60)
|
87 |
|
88 |
# Display Results
|
89 |
col1, col2 = st.columns(2)
|
@@ -96,7 +88,6 @@ def main():
|
|
96 |
st.write(f"• {kw} ({score:.4f})")
|
97 |
|
98 |
# Bar Chart
|
99 |
-
progress.text("Rendering chart...")
|
100 |
df_scores = pd.DataFrame.from_dict(
|
101 |
sentiment_results,
|
102 |
orient='index',
|
@@ -104,44 +95,27 @@ def main():
|
|
104 |
)
|
105 |
df_scores.index.name = 'Sentiment'
|
106 |
st.bar_chart(df_scores)
|
107 |
-
progress.progress(80)
|
108 |
|
109 |
# Highlight Highest Sentiment
|
110 |
max_label, max_score = max(sentiment_results.items(), key=lambda x: x[1])
|
111 |
st.markdown(f"**Highest Sentiment:** **{max_label}** ({max_score:.4f})")
|
|
|
112 |
|
113 |
-
# Generate
|
114 |
-
|
115 |
-
if max_label in ["
|
116 |
-
prompt = f"""
|
117 |
-
You are a product quality and customer experience specialist at an e-commerce food retailer.
|
118 |
-
|
119 |
-
Customer Review:
|
120 |
-
"{review}"
|
121 |
-
|
122 |
-
Instructions: Analyze the feedback and provide three distinct, actionable improvement recommendations. For each, include a concise title and a detailed explanation in 5–7 sentences, plus a bullet list of 3–5 execution steps and a measure of impact.
|
123 |
-
|
124 |
-
Output only the three numbered recommendations (1–3), each with its title, detailed explanation, steps, and impact measure.
|
125 |
-
"""
|
126 |
-
# Ensure longer outputs by specifying generation parameters
|
127 |
-
response = generation_pipeline(
|
128 |
-
prompt,
|
129 |
-
max_new_tokens=300,
|
130 |
-
min_length=200,
|
131 |
-
do_sample=True,
|
132 |
-
temperature=0.7,
|
133 |
-
top_p=0.9,
|
134 |
-
no_repeat_ngram_size=2
|
135 |
-
)
|
136 |
-
detailed = response[0]["generated_text"]
|
137 |
-
st.markdown(detailed)
|
138 |
else:
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
|
|
|
|
144 |
|
|
|
|
|
145 |
|
146 |
-
if __name__ ==
|
147 |
main()
|
|
|
32 |
|
33 |
# ─── FLAN-T5 Generation Pipeline ────────────────────────────────────────────
|
34 |
@st.cache_resource
|
35 |
+
def load_response_pipeline():
|
36 |
seq_tok = AutoTokenizer.from_pretrained("google/flan-t5-base")
|
37 |
seq_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
|
38 |
return pipeline(
|
39 |
"text2text-generation",
|
40 |
model=seq_model,
|
41 |
+
tokenizer=seq_tok,
|
42 |
+
max_new_tokens=150,
|
43 |
+
do_sample=False
|
44 |
)
|
45 |
|
46 |
LABEL_MAP = {
|
|
|
62 |
st.warning("Please enter a review to analyze.")
|
63 |
return
|
64 |
|
65 |
+
# ─── KEEP THIS BLOCK UNCHANGED ─────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
# Sentiment Analysis
|
67 |
+
sentiment_pipeline = load_sentiment_pipeline()
|
68 |
raw_scores = sentiment_pipeline(review)[0]
|
69 |
sentiment_results = {LABEL_MAP[item['label']]: float(item['score']) for item in raw_scores}
|
|
|
70 |
|
71 |
# Keyword Extraction
|
72 |
+
kw_model = load_keybert_model()
|
73 |
keywords = kw_model.extract_keywords(
|
74 |
review,
|
75 |
keyphrase_ngram_range=(1, 2),
|
76 |
stop_words="english",
|
77 |
top_n=3
|
78 |
)
|
|
|
79 |
|
80 |
# Display Results
|
81 |
col1, col2 = st.columns(2)
|
|
|
88 |
st.write(f"• {kw} ({score:.4f})")
|
89 |
|
90 |
# Bar Chart
|
|
|
91 |
df_scores = pd.DataFrame.from_dict(
|
92 |
sentiment_results,
|
93 |
orient='index',
|
|
|
95 |
)
|
96 |
df_scores.index.name = 'Sentiment'
|
97 |
st.bar_chart(df_scores)
|
|
|
98 |
|
99 |
# Highlight Highest Sentiment
|
100 |
max_label, max_score = max(sentiment_results.items(), key=lambda x: x[1])
|
101 |
st.markdown(f"**Highest Sentiment:** **{max_label}** ({max_score:.4f})")
|
102 |
+
# ────────────────────────────────────────────────────────────────────
|
103 |
|
104 |
+
# Generate appropriate reply
|
105 |
+
response_pipeline = load_response_pipeline()
|
106 |
+
if max_label in ["Positive", "Very Positive"]:
|
107 |
+
prompt = f"You are a friendly customer success representative. The customer said: \"{review}\". Write a warm, appreciative reply celebrating their positive experience."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
else:
|
109 |
+
prompt = (
|
110 |
+
f"You are a helpful customer support specialist. The customer said: \"{review}\". "
|
111 |
+
f"Key topics: {[kw for kw, _ in keywords]}. "
|
112 |
+
"Ask 1-2 clarifying questions to understand their concerns, then suggest specific improvements or next steps."
|
113 |
+
)
|
114 |
+
result = response_pipeline(prompt)
|
115 |
+
reply = result[0]['generated_text'].strip()
|
116 |
|
117 |
+
st.subheader("Generated Reply")
|
118 |
+
st.write(reply)
|
119 |
|
120 |
+
if __name__ == '__main__':
|
121 |
main()
|