siddhartharya commited on
Commit
85352fd
1 Parent(s): 2303217

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -10
app.py CHANGED
@@ -173,27 +173,30 @@ def generate_summary(bookmark):
173
  if main_content:
174
  available_content.append(f"Main Content: {main_content}")
175
 
 
 
176
  # Construct the prompt
177
  prompt = f"""
178
- Analyze and summarize the following webpage content:
179
 
180
- {' '.join(available_content)}
181
 
182
- If the content seems insufficient or outdated, please use any additional knowledge you have about the current state of the website to provide an accurate summary.
183
 
184
  Provide a concise summary (2-3 sentences) focusing on:
185
  - The main purpose or topic of the page.
186
  - Key information or features.
187
  - Target audience or use case (if apparent).
188
 
 
 
189
  Be factual and objective.
190
  """
191
 
192
  # Call the LLM via Groq Cloud API
193
  response = openai.ChatCompletion.create(
194
- model='llama-3.1-70b-versatile', # Updated model
195
  messages=[
196
- {"role": "system", "content": "You are a helpful assistant that creates concise webpage summaries."},
197
  {"role": "user", "content": prompt}
198
  ],
199
  max_tokens=200,
@@ -201,13 +204,48 @@ Be factual and objective.
201
  )
202
 
203
  summary = response['choices'][0]['message']['content'].strip()
 
 
204
  logger.info("Successfully generated LLM summary")
205
  bookmark['summary'] = summary
206
  return bookmark
207
 
208
  except Exception as e:
209
  logger.error(f"Error generating summary: {e}", exc_info=True)
210
- bookmark['summary'] = 'No summary available.'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  return bookmark
212
 
213
  def parse_bookmarks(file_content):
@@ -330,9 +368,8 @@ Respond with only the category name.
330
 
331
  try:
332
  response = openai.ChatCompletion.create(
333
- model='llama-3.1-70b-versatile', # Updated model
334
  messages=[
335
- {"role": "system", "content": "You categorize webpages based on their content."},
336
  {"role": "user", "content": prompt}
337
  ],
338
  max_tokens=10,
@@ -608,9 +645,8 @@ Provide a concise and helpful response.
608
  """
609
 
610
  response = openai.ChatCompletion.create(
611
- model='llama-3.1-70b-versatile', # Updated model
612
  messages=[
613
- {"role": "system", "content": "You assist users by finding relevant information from their bookmarks."},
614
  {"role": "user", "content": prompt}
615
  ],
616
  max_tokens=500,
 
173
  if main_content:
174
  available_content.append(f"Main Content: {main_content}")
175
 
176
+ content_text = ' '.join(available_content)
177
+
178
  # Construct the prompt
179
  prompt = f"""
180
+ You are a helpful assistant that creates concise webpage summaries.
181
 
182
+ Analyze the following webpage content:
183
 
184
+ {content_text}
185
 
186
  Provide a concise summary (2-3 sentences) focusing on:
187
  - The main purpose or topic of the page.
188
  - Key information or features.
189
  - Target audience or use case (if apparent).
190
 
191
+ If the content is insufficient, use your prior knowledge about the website.
192
+
193
  Be factual and objective.
194
  """
195
 
196
  # Call the LLM via Groq Cloud API
197
  response = openai.ChatCompletion.create(
198
+ model='llama3-8b-8192', # Reverted back to the previous model
199
  messages=[
 
200
  {"role": "user", "content": prompt}
201
  ],
202
  max_tokens=200,
 
204
  )
205
 
206
  summary = response['choices'][0]['message']['content'].strip()
207
+ if not summary:
208
+ raise ValueError("Empty summary received from the model.")
209
  logger.info("Successfully generated LLM summary")
210
  bookmark['summary'] = summary
211
  return bookmark
212
 
213
  except Exception as e:
214
  logger.error(f"Error generating summary: {e}", exc_info=True)
215
+ # Fallback to prior knowledge
216
+ try:
217
+ prompt = f"""
218
+ You are a knowledgeable assistant.
219
+
220
+ The user provided a URL: {bookmark.get('url')}
221
+
222
+ Provide a concise summary (2-3 sentences) about this website based on your knowledge.
223
+
224
+ Focus on:
225
+ - The main purpose or topic of the website.
226
+ - Key information or features.
227
+ - Target audience or use case (if apparent).
228
+
229
+ Be factual and objective.
230
+ """
231
+
232
+ response = openai.ChatCompletion.create(
233
+ model='llama3-8b-8192', # Reverted back to the previous model
234
+ messages=[
235
+ {"role": "user", "content": prompt}
236
+ ],
237
+ max_tokens=200,
238
+ temperature=0.5,
239
+ )
240
+
241
+ summary = response['choices'][0]['message']['content'].strip()
242
+ if not summary:
243
+ raise ValueError("Empty summary received from the model.")
244
+ logger.info("Successfully generated LLM summary using prior knowledge")
245
+ bookmark['summary'] = summary
246
+ except Exception as inner_e:
247
+ logger.error(f"Error generating summary using prior knowledge: {inner_e}", exc_info=True)
248
+ bookmark['summary'] = 'No summary available.'
249
  return bookmark
250
 
251
  def parse_bookmarks(file_content):
 
368
 
369
  try:
370
  response = openai.ChatCompletion.create(
371
+ model='llama3-8b-8192', # Reverted back to the previous model
372
  messages=[
 
373
  {"role": "user", "content": prompt}
374
  ],
375
  max_tokens=10,
 
645
  """
646
 
647
  response = openai.ChatCompletion.create(
648
+ model='llama3-8b-8192', # Reverted back to the previous model
649
  messages=[
 
650
  {"role": "user", "content": prompt}
651
  ],
652
  max_tokens=500,