mayf commited on
Commit
d26f340
·
verified ·
1 Parent(s): a6c9351

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -14
app.py CHANGED
@@ -31,14 +31,15 @@ def load_keybert_model():
31
  return KeyBERT(model="all-MiniLM-L6-v2")
32
 
33
  # ─── BlenderBot Response Pipeline ───────────────────────────────────────────
34
- from transformers import Conversation
35
-
36
  @st.cache_resource
37
  def load_response_pipeline():
38
- # High-level conversational helper using BlenderBot 400M Distill
39
  return pipeline(
40
- "conversational",
41
- model="facebook/blenderbot-400M-distill"
 
 
 
42
  )
43
 
44
  LABEL_MAP = {
@@ -102,17 +103,19 @@ def main():
102
  # Generate appropriate reply
103
  response_pipeline = load_response_pipeline()
104
  if max_label in ["Positive", "Very Positive"]:
105
- user_input = f"The customer said: \"{review}\". Write a warm, appreciative two-sentence reply celebrating their positive experience."
 
 
 
106
  else:
107
- user_input = (
108
- f"The customer said: \"{review}\". Identified issues: {', '.join([kw for kw, _ in keywords])}. "
109
- "First, ask 1-2 clarifying questions to better understand their situation. "
 
110
  "Then provide two concrete suggestions or next steps to address these issues."
111
  )
112
- conv = Conversation(user_input)
113
- response = response_pipeline(conv)
114
- # Grab the latest generated response
115
- reply = response.generated_responses[-1]
116
 
117
  st.subheader("Generated Reply")
118
  st.write(reply)
@@ -120,4 +123,3 @@ def main():
120
 
121
  if __name__ == '__main__':
122
  main()
123
-
 
31
  return KeyBERT(model="all-MiniLM-L6-v2")
32
 
33
  # ─── BlenderBot Response Pipeline ───────────────────────────────────────────
 
 
34
  @st.cache_resource
35
  def load_response_pipeline():
36
+ # Use BlenderBot 400M Distill for text generation
37
  return pipeline(
38
+ "text2text-generation",
39
+ model="facebook/blenderbot-400M-distill",
40
+ tokenizer="facebook/blenderbot-400M-distill",
41
+ max_new_tokens=150,
42
+ do_sample=False
43
  )
44
 
45
  LABEL_MAP = {
 
103
  # Generate appropriate reply
104
  response_pipeline = load_response_pipeline()
105
  if max_label in ["Positive", "Very Positive"]:
106
+ prompt = (
107
+ f"You are a friendly customer success representative. The customer said: \"{review}\". "
108
+ "Write two sentences to express gratitude and highlight their positive experience."
109
+ )
110
  else:
111
+ prompt = (
112
+ f"You are a helpful customer support specialist. The customer said: \"{review}\". "
113
+ f"Identified issues: {', '.join([kw for kw, _ in keywords])}. "
114
+ "First, ask 1-2 clarifying questions to understand their situation. "
115
  "Then provide two concrete suggestions or next steps to address these issues."
116
  )
117
+ result = response_pipeline(prompt)
118
+ reply = result[0]['generated_text'].strip()
 
 
119
 
120
  st.subheader("Generated Reply")
121
  st.write(reply)
 
123
 
124
  if __name__ == '__main__':
125
  main()