mayf commited on
Commit
ef53265
·
verified ·
1 Parent(s): 9359b84

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -20
app.py CHANGED
@@ -6,23 +6,12 @@ from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassifica
6
  from keybert import KeyBERT
7
 
8
  # ─── DeepSeek Model Client ────────────────────────────────────────────────────
9
- # Option 1: High-level helper pipeline for chat-style generation
10
- chat = pipeline(
11
- "chat",
12
  model="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
13
  trust_remote_code=True
14
  )
15
-
16
- # Option 2: Direct model & tokenizer instantiation (alternative) Direct model & tokenizer instantiation (alternative)
17
- # tokenizer_ds = AutoTokenizer.from_pretrained(
18
- # "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
19
- # trust_remote_code=True
20
- # )
21
- # model_ds = AutoModelForCausalLM.from_pretrained(
22
- # "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
23
- # trust_remote_code=True
24
- # )
25
-
26
  @st.cache_resource
27
  def load_sentiment_pipeline():
28
  model_name = "mayf/amazon_reviews_bert_ft"
@@ -118,15 +107,17 @@ Tasks:
118
  2. Recommendations: Three separate paragraphs with actionable suggestions (max 30 words each).
119
  """
120
 
121
- # Use the high-level pipeline for generation
122
- chat_input = [
123
- {"role": "system", "content": "You are a product-feedback analyst."},
124
- {"role": "user", "content": prompt}
125
- ]
126
- gen_output = pipe(chat_input)
127
  gpt_reply = gen_output[0]['generated_text']
128
 
129
  # Alternative: direct model invocation
 
 
 
130
  # inputs = tokenizer_ds(prompt, return_tensors="pt")
131
  # outputs = model_ds.generate(**inputs, max_new_tokens=200)
132
  # gpt_reply = tokenizer_ds.decode(outputs[0], skip_special_tokens=True)
 
6
  from keybert import KeyBERT
7
 
8
  # ─── DeepSeek Model Client ────────────────────────────────────────────────────
9
+ # High-level helper pipeline for text-generation (flattened chat messages)
10
+ pipe = pipeline(
11
+ "text-generation",
12
  model="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
13
  trust_remote_code=True
14
  )
 
 
 
 
 
 
 
 
 
 
 
15
  @st.cache_resource
16
  def load_sentiment_pipeline():
17
  model_name = "mayf/amazon_reviews_bert_ft"
 
107
  2. Recommendations: Three separate paragraphs with actionable suggestions (max 30 words each).
108
  """
109
 
110
+ # Use the high-level text-generation pipeline with flattened prompt
111
+ # Flatten roles into a single text prompt
112
+ flat_prompt = "
113
+ ".join(f"{msg['role'].upper()}: {msg['content']}" for msg in chat_input)
114
+ gen_output = pipe(flat_prompt, max_new_tokens=200)
 
115
  gpt_reply = gen_output[0]['generated_text']
116
 
117
  # Alternative: direct model invocation
118
+ # inputs = tokenizer_ds(flat_prompt, return_tensors="pt")
119
+ # outputs = model_ds.generate(**inputs, max_new_tokens=200)
120
+ # gpt_reply = tokenizer_ds.decode(outputs[0], skip_special_tokens=True)
121
  # inputs = tokenizer_ds(prompt, return_tensors="pt")
122
  # outputs = model_ds.generate(**inputs, max_new_tokens=200)
123
  # gpt_reply = tokenizer_ds.decode(outputs[0], skip_special_tokens=True)