Spaces:
Sleeping
Sleeping
# app.py | |
import gradio as gr | |
from bs4 import BeautifulSoup | |
from sentence_transformers import SentenceTransformer | |
import faiss | |
import numpy as np | |
import asyncio | |
import aiohttp | |
import re | |
import base64 | |
import logging | |
import os | |
import sys | |
# Import OpenAI library | |
import openai | |
# Set up logging to output to the console | |
logger = logging.getLogger(__name__) | |
logger.setLevel(logging.INFO) | |
# Create a console handler | |
console_handler = logging.StreamHandler(sys.stdout) | |
console_handler.setLevel(logging.INFO) | |
# Create a formatter and set it for the handler | |
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s') | |
console_handler.setFormatter(formatter) | |
# Add the handler to the logger | |
logger.addHandler(console_handler) | |
# Initialize models and variables | |
logger.info("Initializing models and variables") | |
embedding_model = SentenceTransformer('all-MiniLM-L6-v2') | |
faiss_index = None | |
bookmarks = [] | |
fetch_cache = {} | |
# Define the categories | |
CATEGORIES = [ | |
"Social Media", | |
"News and Media", | |
"Education and Learning", | |
"Entertainment", | |
"Shopping and E-commerce", | |
"Finance and Banking", | |
"Technology", | |
"Health and Fitness", | |
"Travel and Tourism", | |
"Food and Recipes", | |
"Sports", | |
"Arts and Culture", | |
"Government and Politics", | |
"Business and Economy", | |
"Science and Research", | |
"Personal Blogs and Journals", | |
"Job Search and Careers", | |
"Music and Audio", | |
"Videos and Movies", | |
"Reference and Knowledge Bases", | |
"Dead Link", | |
"Uncategorized", | |
] | |
# Set up Groq Cloud API key and base URL | |
GROQ_API_KEY = os.getenv('GROQ_API_KEY') | |
if not GROQ_API_KEY: | |
logger.error("GROQ_API_KEY environment variable not set.") | |
# Set OpenAI API key and base URL to use Groq Cloud API | |
openai.api_key = GROQ_API_KEY | |
openai.api_base = "https://api.groq.com/openai/v1" | |
def extract_main_content(soup): | |
""" | |
Extract the main content from a webpage while filtering out boilerplate content. | |
""" | |
if not soup: | |
return "" | |
# Remove script and style elements | |
for element in soup(['script', 'style', 'header', 'footer', 'nav', 'ads', 'sidebar']): | |
element.decompose() | |
# First try to find content in main content areas | |
main_content_tags = soup.find_all(['article', 'main', 'div.content', 'div.post', 'div.entry-content']) | |
if main_content_tags: | |
content = ' '.join([tag.get_text(strip=True, separator=' ') for tag in main_content_tags]) | |
else: | |
# Try to find content in <p> tags | |
p_tags = soup.find_all('p') | |
if p_tags: | |
content = ' '.join([p.get_text(strip=True, separator=' ') for p in p_tags]) | |
else: | |
# Fallback to body content | |
content = soup.body.get_text(strip=True, separator=' ') if soup.body else soup.get_text(strip=True, separator=' ') | |
# Clean up the text | |
content = ' '.join(content.split()) | |
content = re.sub(r'\s+', ' ', content) # Remove multiple spaces | |
content = re.sub(r'[\n\r\t]', ' ', content) # Remove newlines and tabs | |
# Return the content | |
return content | |
def get_page_metadata(soup): | |
""" | |
Extract metadata from the webpage including title, description, and keywords. | |
""" | |
metadata = { | |
'title': '', | |
'description': '', | |
'keywords': '' | |
} | |
if not soup: | |
return metadata | |
# Get title | |
title_tag = soup.find('title') | |
if title_tag and title_tag.string: | |
metadata['title'] = title_tag.string.strip() | |
# Get meta description (try multiple variants) | |
meta_desc = ( | |
soup.find('meta', attrs={'name': 'description'}) or | |
soup.find('meta', attrs={'property': 'og:description'}) or | |
soup.find('meta', attrs={'name': 'twitter:description'}) | |
) | |
if meta_desc: | |
metadata['description'] = meta_desc.get('content', '').strip() | |
# Get meta keywords | |
meta_keywords = soup.find('meta', attrs={'name': 'keywords'}) | |
if meta_keywords: | |
metadata['keywords'] = meta_keywords.get('content', '').strip() | |
# Get OG title if main title is empty | |
if not metadata['title']: | |
og_title = soup.find('meta', attrs={'property': 'og:title'}) | |
if og_title: | |
metadata['title'] = og_title.get('content', '').strip() | |
return metadata | |
def generate_summary(bookmark): | |
""" | |
Generate a comprehensive summary for a bookmark using available content and LLM via the Groq Cloud API. | |
""" | |
logger.info(f"Generating summary for bookmark: {bookmark.get('url')}") | |
try: | |
html_content = bookmark.get('html_content', '') | |
# Get the HTML soup object from the bookmark | |
soup = BeautifulSoup(html_content, 'html.parser') | |
# Extract metadata and main content | |
metadata = get_page_metadata(soup) | |
main_content = extract_main_content(soup) | |
# Prepare content for the prompt | |
available_content = [] | |
if metadata['title']: | |
available_content.append(f"Title: {metadata['title']}") | |
if metadata['description']: | |
available_content.append(f"Description: {metadata['description']}") | |
if metadata['keywords']: | |
available_content.append(f"Keywords: {metadata['keywords']}") | |
if main_content: | |
available_content.append(f"Main Content: {main_content}") | |
content_text = ' '.join(available_content) | |
# Construct the prompt | |
prompt = f""" | |
You are a helpful assistant that creates concise webpage summaries. | |
Analyze the following webpage content: | |
{content_text} | |
Provide a concise summary (2-3 sentences) focusing on: | |
- The main purpose or topic of the page. | |
- Key information or features. | |
- Target audience or use case (if apparent). | |
If the content is insufficient, use your prior knowledge about the website. | |
Be factual and objective. | |
""" | |
# Call the LLM via Groq Cloud API | |
response = openai.ChatCompletion.create( | |
model='llama3-8b-8192', # Reverted back to the previous model | |
messages=[ | |
{"role": "user", "content": prompt} | |
], | |
max_tokens=200, | |
temperature=0.5, | |
) | |
summary = response['choices'][0]['message']['content'].strip() | |
if not summary: | |
raise ValueError("Empty summary received from the model.") | |
logger.info("Successfully generated LLM summary") | |
bookmark['summary'] = summary | |
return bookmark | |
except Exception as e: | |
logger.error(f"Error generating summary: {e}", exc_info=True) | |
# Fallback to prior knowledge | |
try: | |
prompt = f""" | |
You are a knowledgeable assistant. | |
The user provided a URL: {bookmark.get('url')} | |
Provide a concise summary (2-3 sentences) about this website based on your knowledge. | |
Focus on: | |
- The main purpose or topic of the website. | |
- Key information or features. | |
- Target audience or use case (if apparent). | |
Be factual and objective. | |
""" | |
response = openai.ChatCompletion.create( | |
model='llama3-8b-8192', # Reverted back to the previous model | |
messages=[ | |
{"role": "user", "content": prompt} | |
], | |
max_tokens=200, | |
temperature=0.5, | |
) | |
summary = response['choices'][0]['message']['content'].strip() | |
if not summary: | |
raise ValueError("Empty summary received from the model.") | |
logger.info("Successfully generated LLM summary using prior knowledge") | |
bookmark['summary'] = summary | |
except Exception as inner_e: | |
logger.error(f"Error generating summary using prior knowledge: {inner_e}", exc_info=True) | |
bookmark['summary'] = 'No summary available.' | |
return bookmark | |
def parse_bookmarks(file_content): | |
""" | |
Parse bookmarks from HTML file. | |
""" | |
logger.info("Parsing bookmarks") | |
try: | |
soup = BeautifulSoup(file_content, 'html.parser') | |
extracted_bookmarks = [] | |
for link in soup.find_all('a'): | |
url = link.get('href') | |
title = link.text.strip() | |
if url and title: | |
extracted_bookmarks.append({'url': url, 'title': title}) | |
logger.info(f"Extracted {len(extracted_bookmarks)} bookmarks") | |
return extracted_bookmarks | |
except Exception as e: | |
logger.error("Error parsing bookmarks: %s", e, exc_info=True) | |
raise | |
async def fetch_url_info(session, bookmark): | |
""" | |
Fetch information about a URL asynchronously. | |
""" | |
url = bookmark['url'] | |
if url in fetch_cache: | |
bookmark.update(fetch_cache[url]) | |
return bookmark | |
try: | |
logger.info(f"Fetching URL info for: {url}") | |
headers = { | |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' | |
'AppleWebKit/537.36 (KHTML, like Gecko) ' | |
'Chrome/91.0.4472.124 Safari/537.36', | |
'Accept-Language': 'en-US,en;q=0.9', | |
} | |
async with session.get(url, timeout=20, headers=headers, ssl=False) as response: | |
bookmark['etag'] = response.headers.get('ETag', 'N/A') | |
bookmark['status_code'] = response.status | |
content = await response.text() | |
if response.status >= 500: | |
# Server error, consider as dead link | |
bookmark['dead_link'] = True | |
bookmark['description'] = '' | |
bookmark['html_content'] = '' | |
logger.warning(f"Dead link detected: {url} with status {response.status}") | |
else: | |
bookmark['dead_link'] = False | |
bookmark['html_content'] = content | |
bookmark['description'] = '' | |
logger.info(f"Fetched information for {url}") | |
except Exception as e: | |
bookmark['dead_link'] = True | |
bookmark['etag'] = 'N/A' | |
bookmark['status_code'] = 'N/A' | |
bookmark['description'] = '' | |
bookmark['html_content'] = '' | |
logger.error(f"Error fetching URL info for {url}: {e}", exc_info=True) | |
finally: | |
fetch_cache[url] = { | |
'etag': bookmark.get('etag'), | |
'status_code': bookmark.get('status_code'), | |
'dead_link': bookmark.get('dead_link'), | |
'description': bookmark.get('description'), | |
'html_content': bookmark.get('html_content', ''), | |
} | |
return bookmark | |
async def process_bookmarks_async(bookmarks_list): | |
""" | |
Process all bookmarks asynchronously. | |
""" | |
logger.info("Processing bookmarks asynchronously") | |
try: | |
connector = aiohttp.TCPConnector(limit=5) # Limit concurrent connections | |
timeout = aiohttp.ClientTimeout(total=30) # Set timeout | |
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session: | |
tasks = [] | |
for bookmark in bookmarks_list: | |
task = asyncio.ensure_future(fetch_url_info(session, bookmark)) | |
tasks.append(task) | |
await asyncio.gather(*tasks) | |
logger.info("Completed processing bookmarks asynchronously") | |
except Exception as e: | |
logger.error(f"Error in asynchronous processing of bookmarks: {e}", exc_info=True) | |
raise | |
def assign_category(bookmark): | |
""" | |
Assign a category to a bookmark using the LLM based on its summary via the Groq Cloud API. | |
""" | |
if bookmark.get('dead_link'): | |
bookmark['category'] = 'Dead Link' | |
logger.info(f"Assigned category 'Dead Link' to bookmark: {bookmark.get('url')}") | |
return bookmark | |
summary = bookmark.get('summary', '') | |
if not summary: | |
bookmark['category'] = 'Uncategorized' | |
return bookmark | |
# Prepare the prompt | |
categories_str = ', '.join([f'"{cat}"' for cat in CATEGORIES if cat != 'Dead Link']) | |
prompt = f""" | |
Based on the following summary, assign the most appropriate category from the list below. | |
Summary: | |
{summary} | |
Categories: | |
{categories_str} | |
Respond with only the category name. | |
""" | |
try: | |
response = openai.ChatCompletion.create( | |
model='llama3-8b-8192', # Reverted back to the previous model | |
messages=[ | |
{"role": "user", "content": prompt} | |
], | |
max_tokens=10, | |
temperature=0, | |
) | |
category = response['choices'][0]['message']['content'].strip().strip('"') | |
# Validate the category | |
if category in CATEGORIES: | |
bookmark['category'] = category | |
logger.info(f"Assigned category '{category}' to bookmark: {bookmark.get('url')}") | |
else: | |
bookmark['category'] = 'Uncategorized' | |
logger.warning(f"Invalid category '{category}' returned by LLM for bookmark: {bookmark.get('url')}") | |
return bookmark | |
except Exception as e: | |
logger.error(f"Error assigning category: {e}", exc_info=True) | |
bookmark['category'] = 'Uncategorized' | |
return bookmark | |
def vectorize_and_index(bookmarks_list): | |
""" | |
Create vector embeddings for bookmarks and build FAISS index with ID mapping. | |
""" | |
logger.info("Vectorizing summaries and building FAISS index") | |
try: | |
summaries = [bookmark['summary'] for bookmark in bookmarks_list] | |
embeddings = embedding_model.encode(summaries) | |
dimension = embeddings.shape[1] | |
index = faiss.IndexIDMap(faiss.IndexFlatL2(dimension)) | |
# Assign unique IDs to each bookmark | |
ids = np.array([bookmark['id'] for bookmark in bookmarks_list], dtype=np.int64) | |
index.add_with_ids(np.array(embeddings).astype('float32'), ids) | |
logger.info("FAISS index built successfully with IDs") | |
return index | |
except Exception as e: | |
logger.error(f"Error in vectorizing and indexing: {e}", exc_info=True) | |
raise | |
def display_bookmarks(): | |
""" | |
Generate HTML display for bookmarks. | |
""" | |
logger.info("Generating HTML display for bookmarks") | |
cards = '' | |
for i, bookmark in enumerate(bookmarks): | |
index = i + 1 | |
status = "β Dead Link" if bookmark.get('dead_link') else "β Active" | |
title = bookmark['title'] | |
url = bookmark['url'] | |
etag = bookmark.get('etag', 'N/A') | |
summary = bookmark.get('summary', '') | |
category = bookmark.get('category', 'Uncategorized') | |
if bookmark.get('dead_link'): | |
card_style = "border: 2px solid var(--error-color);" | |
text_style = "color: var(--error-color);" | |
else: | |
card_style = "border: 2px solid var(--success-color);" | |
text_style = "color: var(--text-color);" | |
# Escape HTML content to prevent XSS attacks | |
from html import escape | |
title = escape(title) | |
url = escape(url) | |
summary = escape(summary) | |
category = escape(category) | |
card_html = f''' | |
<div class="card" style="{card_style}; padding: 10px; margin: 10px; border-radius: 5px;"> | |
<div class="card-content"> | |
<h3 style="{text_style}">{index}. {title} {status}</h3> | |
<p style="{text_style}"><strong>Category:</strong> {category}</p> | |
<p style="{text_style}"><strong>URL:</strong> <a href="{url}" target="_blank" style="{text_style}">{url}</a></p> | |
<p style="{text_style}"><strong>ETag:</strong> {etag}</p> | |
<p style="{text_style}"><strong>Summary:</strong> {summary}</p> | |
</div> | |
</div> | |
''' | |
cards += card_html | |
logger.info("HTML display generated") | |
return cards | |
def process_uploaded_file(file): | |
""" | |
Process the uploaded bookmarks file. | |
""" | |
global bookmarks, faiss_index | |
logger.info("Processing uploaded file") | |
if file is None: | |
logger.warning("No file uploaded") | |
return "Please upload a bookmarks HTML file.", '', gr.update(choices=[]), display_bookmarks() | |
try: | |
file_content = file.decode('utf-8') | |
except UnicodeDecodeError as e: | |
logger.error(f"Error decoding the file: {e}", exc_info=True) | |
return "Error decoding the file. Please ensure it's a valid HTML file.", '', gr.update(choices=[]), display_bookmarks() | |
try: | |
bookmarks = parse_bookmarks(file_content) | |
except Exception as e: | |
logger.error(f"Error parsing bookmarks: {e}", exc_info=True) | |
return "Error parsing the bookmarks HTML file.", '', gr.update(choices=[]), display_bookmarks() | |
if not bookmarks: | |
logger.warning("No bookmarks found in the uploaded file") | |
return "No bookmarks found in the uploaded file.", '', gr.update(choices=[]), display_bookmarks() | |
# Assign unique IDs to bookmarks | |
for idx, bookmark in enumerate(bookmarks): | |
bookmark['id'] = idx | |
# Asynchronously fetch bookmark info | |
try: | |
asyncio.run(process_bookmarks_async(bookmarks)) | |
except Exception as e: | |
logger.error(f"Error processing bookmarks asynchronously: {e}", exc_info=True) | |
return "Error processing bookmarks.", '', gr.update(choices=[]), display_bookmarks() | |
# Generate summaries and assign categories | |
for bookmark in bookmarks: | |
generate_summary(bookmark) | |
assign_category(bookmark) | |
try: | |
faiss_index = vectorize_and_index(bookmarks) | |
except Exception as e: | |
logger.error(f"Error building FAISS index: {e}", exc_info=True) | |
return "Error building search index.", '', gr.update(choices=[]), display_bookmarks() | |
message = f"β Successfully processed {len(bookmarks)} bookmarks." | |
logger.info(message) | |
# Generate displays and updates | |
bookmark_html = display_bookmarks() | |
choices = [f"{i+1}. {bookmark['title']} (Category: {bookmark['category']})" | |
for i, bookmark in enumerate(bookmarks)] | |
return message, bookmark_html, gr.update(choices=choices), bookmark_html | |
def delete_selected_bookmarks(selected_indices): | |
""" | |
Delete selected bookmarks and remove their vectors from the FAISS index. | |
""" | |
global bookmarks, faiss_index | |
if not selected_indices: | |
return "β οΈ No bookmarks selected.", gr.update(choices=[]), display_bookmarks() | |
ids_to_delete = [] | |
indices_to_delete = [] | |
for s in selected_indices: | |
idx = int(s.split('.')[0]) - 1 | |
if 0 <= idx < len(bookmarks): | |
bookmark_id = bookmarks[idx]['id'] | |
ids_to_delete.append(bookmark_id) | |
indices_to_delete.append(idx) | |
logger.info(f"Deleting bookmark at index {idx + 1}") | |
# Remove vectors from FAISS index | |
if faiss_index is not None and ids_to_delete: | |
faiss_index.remove_ids(np.array(ids_to_delete, dtype=np.int64)) | |
# Remove bookmarks from the list (reverse order to avoid index shifting) | |
for idx in sorted(indices_to_delete, reverse=True): | |
bookmarks.pop(idx) | |
message = "ποΈ Selected bookmarks deleted successfully." | |
logger.info(message) | |
choices = [f"{i+1}. {bookmark['title']} (Category: {bookmark['category']})" | |
for i, bookmark in enumerate(bookmarks)] | |
return message, gr.update(choices=choices), display_bookmarks() | |
def edit_selected_bookmarks_category(selected_indices, new_category): | |
""" | |
Edit category of selected bookmarks. | |
""" | |
if not selected_indices: | |
return "β οΈ No bookmarks selected.", gr.update(choices=[]), display_bookmarks() | |
if not new_category: | |
return "β οΈ No new category selected.", gr.update(choices=[]), display_bookmarks() | |
indices = [int(s.split('.')[0])-1 for s in selected_indices] | |
for idx in indices: | |
if 0 <= idx < len(bookmarks): | |
bookmarks[idx]['category'] = new_category | |
logger.info(f"Updated category for bookmark {idx + 1} to {new_category}") | |
message = "βοΈ Category updated for selected bookmarks." | |
logger.info(message) | |
# Update choices and display | |
choices = [f"{i+1}. {bookmark['title']} (Category: {bookmark['category']})" | |
for i, bookmark in enumerate(bookmarks)] | |
return message, gr.update(choices=choices), display_bookmarks() | |
def export_bookmarks(): | |
""" | |
Export bookmarks to HTML file. | |
""" | |
if not bookmarks: | |
logger.warning("No bookmarks to export") | |
return "β οΈ No bookmarks to export." | |
try: | |
logger.info("Exporting bookmarks to HTML") | |
soup = BeautifulSoup("<!DOCTYPE NETSCAPE-Bookmark-file-1><Title>Bookmarks</Title><H1>Bookmarks</H1>", 'html.parser') | |
dl = soup.new_tag('DL') | |
for bookmark in bookmarks: | |
dt = soup.new_tag('DT') | |
a = soup.new_tag('A', href=bookmark['url']) | |
a.string = bookmark['title'] | |
dt.append(a) | |
dl.append(dt) | |
soup.append(dl) | |
html_content = str(soup) | |
b64 = base64.b64encode(html_content.encode()).decode() | |
href = f'data:text/html;base64,{b64}' | |
logger.info("Bookmarks exported successfully") | |
return f'<a href="{href}" download="bookmarks.html">πΎ Download Exported Bookmarks</a>' | |
except Exception as e: | |
logger.error(f"Error exporting bookmarks: {e}", exc_info=True) | |
return "β οΈ Error exporting bookmarks." | |
def chatbot_response(user_query): | |
""" | |
Generate chatbot response using the FAISS index and embeddings. | |
""" | |
if not bookmarks or faiss_index is None: | |
logger.warning("No bookmarks available for chatbot") | |
return "β οΈ No bookmarks available. Please upload and process your bookmarks first." | |
logger.info(f"Chatbot received query: {user_query}") | |
try: | |
# Encode the user query | |
query_vector = embedding_model.encode([user_query]).astype('float32') | |
# Search the FAISS index | |
k = 5 # Number of results to return | |
distances, ids = faiss_index.search(query_vector, k) | |
ids = ids.flatten() | |
# Retrieve the bookmarks | |
id_to_bookmark = {bookmark['id']: bookmark for bookmark in bookmarks} | |
matching_bookmarks = [id_to_bookmark.get(id) for id in ids if id in id_to_bookmark] | |
if not matching_bookmarks: | |
return "No relevant bookmarks found for your query." | |
# Format the response | |
bookmarks_info = "\n".join([ | |
f"Title: {bookmark['title']}\nURL: {bookmark['url']}\nSummary: {bookmark['summary']}" | |
for bookmark in matching_bookmarks | |
]) | |
# Use the LLM via Groq Cloud API to generate a response | |
prompt = f""" | |
A user asked: "{user_query}" | |
Based on the bookmarks below, provide a helpful answer to the user's query, referencing the relevant bookmarks. | |
Bookmarks: | |
{bookmarks_info} | |
Provide a concise and helpful response. | |
""" | |
response = openai.ChatCompletion.create( | |
model='llama3-8b-8192', # Reverted back to the previous model | |
messages=[ | |
{"role": "user", "content": prompt} | |
], | |
max_tokens=500, | |
temperature=0.7, | |
) | |
answer = response['choices'][0]['message']['content'].strip() | |
logger.info("Chatbot response generated using Groq Cloud API") | |
return answer | |
except Exception as e: | |
error_message = f"β οΈ Error processing your query: {str(e)}" | |
logger.error(error_message, exc_info=True) | |
return error_message | |
def build_app(): | |
""" | |
Build and launch the Gradio app. | |
""" | |
try: | |
logger.info("Building Gradio app") | |
with gr.Blocks(css="app.css") as demo: | |
# General Overview | |
gr.Markdown(""" | |
# π SmartMarks - AI Browser Bookmarks Manager | |
Welcome to **SmartMarks**, your intelligent assistant for managing browser bookmarks. SmartMarks leverages AI to help you organize, search, and interact with your bookmarks seamlessly. | |
--- | |
## π **How to Use SmartMarks** | |
SmartMarks is divided into three main sections: | |
1. **π Upload and Process Bookmarks:** Import your existing bookmarks and let SmartMarks analyze and categorize them for you. | |
2. **π¬ Chat with Bookmarks:** Interact with your bookmarks using natural language queries to find relevant links effortlessly. | |
3. **π οΈ Manage Bookmarks:** View, edit, delete, and export your bookmarks with ease. | |
""") | |
# Upload and Process Bookmarks Tab | |
with gr.Tab("Upload and Process Bookmarks"): | |
gr.Markdown(""" | |
## π **Upload and Process Bookmarks** | |
### π **Steps:** | |
1. Click on the "Upload Bookmarks HTML File" button | |
2. Select your bookmarks file | |
3. Click "Process Bookmarks" to analyze and organize your bookmarks | |
""") | |
upload = gr.File(label="π Upload Bookmarks HTML File", type='binary') | |
process_button = gr.Button("βοΈ Process Bookmarks") | |
output_text = gr.Textbox(label="β Output", interactive=False) | |
bookmark_display = gr.HTML(label="π Processed Bookmarks") | |
# Chat with Bookmarks Tab | |
with gr.Tab("Chat with Bookmarks"): | |
gr.Markdown(""" | |
## π¬ **Chat with Bookmarks** | |
Ask questions about your bookmarks and get relevant results. | |
""") | |
user_input = gr.Textbox( | |
label="βοΈ Ask about your bookmarks", | |
placeholder="e.g., Do I have any bookmarks about AI?" | |
) | |
chat_button = gr.Button("π¨ Send") | |
chat_output = gr.Textbox(label="π¬ Response", interactive=False) | |
# Manage Bookmarks Tab | |
with gr.Tab("Manage Bookmarks"): | |
gr.Markdown(""" | |
## π οΈ **Manage Bookmarks** | |
Select bookmarks to delete or edit their categories. | |
""") | |
manage_output = gr.Textbox(label="π Status", interactive=False) | |
bookmark_selector = gr.CheckboxGroup( | |
label="β Select Bookmarks", | |
choices=[] | |
) | |
new_category = gr.Dropdown( | |
label="π New Category", | |
choices=CATEGORIES, | |
value="Uncategorized" | |
) | |
bookmark_display_manage = gr.HTML(label="π Bookmarks") | |
with gr.Row(): | |
delete_button = gr.Button("ποΈ Delete Selected") | |
edit_category_button = gr.Button("βοΈ Edit Category") | |
export_button = gr.Button("πΎ Export") | |
download_link = gr.HTML(label="π₯ Download") | |
# Set up event handlers | |
process_button.click( | |
process_uploaded_file, | |
inputs=upload, | |
outputs=[output_text, bookmark_display, bookmark_selector, bookmark_display_manage] | |
) | |
chat_button.click( | |
chatbot_response, | |
inputs=user_input, | |
outputs=chat_output | |
) | |
delete_button.click( | |
delete_selected_bookmarks, | |
inputs=bookmark_selector, | |
outputs=[manage_output, bookmark_selector, bookmark_display_manage] | |
) | |
edit_category_button.click( | |
edit_selected_bookmarks_category, | |
inputs=[bookmark_selector, new_category], | |
outputs=[manage_output, bookmark_selector, bookmark_display_manage] | |
) | |
export_button.click( | |
export_bookmarks, | |
outputs=download_link | |
) | |
logger.info("Launching Gradio app") | |
demo.launch(debug=True) | |
except Exception as e: | |
logger.error(f"Error building the app: {e}", exc_info=True) | |
print(f"Error building the app: {e}") | |
if __name__ == "__main__": | |
build_app() | |