Spaces:
Running
Running
# app.py | |
import gradio as gr | |
from bs4 import BeautifulSoup | |
from sentence_transformers import SentenceTransformer | |
import faiss | |
import numpy as np | |
import asyncio | |
import aiohttp | |
import re | |
import base64 | |
import logging | |
import os | |
import sys | |
# Import OpenAI library | |
import openai | |
# Set up logging to output to the console | |
logger = logging.getLogger(__name__) | |
logger.setLevel(logging.INFO) | |
# Create a console handler | |
console_handler = logging.StreamHandler(sys.stdout) | |
console_handler.setLevel(logging.INFO) | |
# Create a formatter and set it for the handler | |
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s') | |
console_handler.setFormatter(formatter) | |
# Add the handler to the logger | |
logger.addHandler(console_handler) | |
# Initialize models and variables | |
logger.info("Initializing models and variables") | |
embedding_model = SentenceTransformer('all-MiniLM-L6-v2') | |
faiss_index = None | |
bookmarks = [] | |
fetch_cache = {} | |
# Define the categories | |
CATEGORIES = [ | |
"Social Media", | |
"News and Media", | |
"Education and Learning", | |
"Entertainment", | |
"Shopping and E-commerce", | |
"Finance and Banking", | |
"Technology", | |
"Health and Fitness", | |
"Travel and Tourism", | |
"Food and Recipes", | |
"Sports", | |
"Arts and Culture", | |
"Government and Politics", | |
"Business and Economy", | |
"Science and Research", | |
"Personal Blogs and Journals", | |
"Job Search and Careers", | |
"Music and Audio", | |
"Videos and Movies", | |
"Reference and Knowledge Bases", | |
"Dead Link", | |
"Uncategorized", | |
] | |
# Set up Groq Cloud API key and base URL | |
GROQ_API_KEY = os.getenv('GROQ_API_KEY') | |
if not GROQ_API_KEY: | |
logger.error("GROQ_API_KEY environment variable not set.") | |
# Set OpenAI API key and base URL to use Groq Cloud API | |
openai.api_key = GROQ_API_KEY | |
openai.api_base = "https://api.groq.com/openai/v1" | |
def extract_main_content(soup): | |
""" | |
Extract the main content from a webpage while filtering out boilerplate content. | |
""" | |
if not soup: | |
return "" | |
# Remove script and style elements | |
for element in soup(['script', 'style', 'header', 'footer', 'nav', 'ads', 'sidebar']): | |
element.decompose() | |
# First try to find content in main content areas | |
main_content_tags = soup.find_all(['article', 'main', 'div.content', 'div.post', 'div.entry-content']) | |
if main_content_tags: | |
content = ' '.join([tag.get_text(strip=True, separator=' ') for tag in main_content_tags]) | |
else: | |
# Try to find content in <p> tags | |
p_tags = soup.find_all('p') | |
if p_tags: | |
content = ' '.join([p.get_text(strip=True, separator=' ') for p in p_tags]) | |
else: | |
# Fallback to body content | |
content = soup.body.get_text(strip=True, separator=' ') if soup.body else soup.get_text(strip=True, separator=' ') | |
# Clean up the text | |
content = ' '.join(content.split()) | |
content = re.sub(r'\s+', ' ', content) # Remove multiple spaces | |
content = re.sub(r'[\n\r\t]', ' ', content) # Remove newlines and tabs | |
# Limit content length to avoid token limits (adjust as needed) | |
return content[:5000] | |
def get_page_metadata(soup): | |
""" | |
Extract metadata from the webpage including title, description, and keywords. | |
""" | |
metadata = { | |
'title': '', | |
'description': '', | |
'keywords': '' | |
} | |
if not soup: | |
return metadata | |
# Get title | |
title_tag = soup.find('title') | |
if title_tag and title_tag.string: | |
metadata['title'] = title_tag.string.strip() | |
# Get meta description (try multiple variants) | |
meta_desc = ( | |
soup.find('meta', attrs={'name': 'description'}) or | |
soup.find('meta', attrs={'property': 'og:description'}) or | |
soup.find('meta', attrs={'name': 'twitter:description'}) | |
) | |
if meta_desc: | |
metadata['description'] = meta_desc.get('content', '').strip() | |
# Get meta keywords | |
meta_keywords = soup.find('meta', attrs={'name': 'keywords'}) | |
if meta_keywords: | |
metadata['keywords'] = meta_keywords.get('content', '').strip() | |
# Get OG title if main title is empty | |
if not metadata['title']: | |
og_title = soup.find('meta', attrs={'property': 'og:title'}) | |
if og_title: | |
metadata['title'] = og_title.get('content', '').strip() | |
return metadata | |
def generate_summary(bookmark): | |
""" | |
Generate a comprehensive summary for a bookmark using available content and LLM. | |
""" | |
logger.info(f"Generating summary for bookmark: {bookmark.get('url')}") | |
try: | |
# Get the HTML soup object from the bookmark if it exists | |
soup = BeautifulSoup(bookmark.get('html_content', ''), 'html.parser') | |
# Step 1: Extract all available information | |
metadata = get_page_metadata(soup) | |
main_content = extract_main_content(soup) | |
# Step 2: Generate summary using LLM with all available content | |
try: | |
# Prepare comprehensive context for LLM | |
available_content = [] | |
if metadata['title']: | |
available_content.append(f"Title: {metadata['title']}") | |
if metadata['description']: | |
available_content.append(f"Description: {metadata['description']}") | |
if metadata['keywords']: | |
available_content.append(f"Keywords: {metadata['keywords']}") | |
if main_content: | |
available_content.append(f"Main Content: {main_content}") | |
if not available_content: | |
logger.warning("No content available for summary generation") | |
bookmark['summary'] = bookmark.get('title', 'No summary available.') | |
return bookmark | |
prompt = f""" | |
Analyze and summarize this webpage based on the following information: | |
{' | '.join(available_content)} | |
Please provide a concise summary (2-3 sentences) focusing on: | |
1. The main purpose or topic of the page | |
2. Key information or features | |
3. Target audience or use case (if apparent) | |
Be factual and objective. | |
""" | |
response = openai.ChatCompletion.create( | |
model='llama3-8b-8192', | |
messages=[ | |
{"role": "system", "content": "You are a helpful assistant that creates concise webpage summaries."}, | |
{"role": "user", "content": prompt} | |
], | |
max_tokens=150, | |
temperature=0.5, | |
) | |
summary = response['choices'][0]['message']['content'].strip() | |
logger.info("Successfully generated LLM summary") | |
bookmark['summary'] = summary | |
return bookmark | |
except Exception as e: | |
logger.error(f"Error generating LLM summary: {e}") | |
# Fallback mechanisms in order of preference | |
if metadata['description']: | |
logger.info("Falling back to meta description") | |
bookmark['summary'] = metadata['description'] | |
elif main_content: | |
logger.info("Falling back to truncated main content") | |
bookmark['summary'] = ' '.join(main_content.split()[:50]) + '...' | |
elif metadata['title']: | |
logger.info("Falling back to title") | |
bookmark['summary'] = metadata['title'] | |
else: | |
bookmark['summary'] = bookmark.get('title', 'No summary available.') | |
return bookmark | |
except Exception as e: | |
logger.error(f"Error in generate_summary: {e}") | |
bookmark['summary'] = bookmark.get('title', 'No summary available.') | |
return bookmark | |
def parse_bookmarks(file_content): | |
""" | |
Parse bookmarks from HTML file. | |
""" | |
logger.info("Parsing bookmarks") | |
try: | |
soup = BeautifulSoup(file_content, 'html.parser') | |
extracted_bookmarks = [] | |
for link in soup.find_all('a'): | |
url = link.get('href') | |
title = link.text.strip() | |
if url and title: | |
extracted_bookmarks.append({'url': url, 'title': title}) | |
logger.info(f"Extracted {len(extracted_bookmarks)} bookmarks") | |
return extracted_bookmarks | |
except Exception as e: | |
logger.error("Error parsing bookmarks: %s", e) | |
raise | |
async def fetch_url_info(session, bookmark): | |
""" | |
Fetch information about a URL asynchronously. | |
""" | |
url = bookmark['url'] | |
if url in fetch_cache: | |
bookmark.update(fetch_cache[url]) | |
return bookmark | |
try: | |
logger.info(f"Fetching URL info for: {url}") | |
headers = { | |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' | |
} | |
async with session.get(url, timeout=10, headers=headers) as response: | |
bookmark['etag'] = response.headers.get('ETag', 'N/A') | |
bookmark['status_code'] = response.status | |
if response.status >= 400: | |
bookmark['dead_link'] = True | |
bookmark['description'] = '' | |
bookmark['html_content'] = '' | |
logger.warning(f"Dead link detected: {url} with status {response.status}") | |
else: | |
bookmark['dead_link'] = False | |
content = await response.text() | |
bookmark['html_content'] = content # Store full HTML for summary generation | |
bookmark['description'] = '' # Will be set by generate_summary function | |
logger.info(f"Fetched information for {url}") | |
except Exception as e: | |
bookmark['dead_link'] = True | |
bookmark['etag'] = 'N/A' | |
bookmark['status_code'] = 'N/A' | |
bookmark['description'] = '' | |
bookmark['html_content'] = '' | |
logger.error(f"Error fetching URL info for {url}: {e}") | |
finally: | |
fetch_cache[url] = { | |
'etag': bookmark.get('etag'), | |
'status_code': bookmark.get('status_code'), | |
'dead_link': bookmark.get('dead_link'), | |
'description': bookmark.get('description'), | |
'html_content': bookmark.get('html_content', '') | |
} | |
return bookmark | |
async def process_bookmarks_async(bookmarks_list): | |
""" | |
Process all bookmarks asynchronously. | |
""" | |
logger.info("Processing bookmarks asynchronously") | |
try: | |
connector = aiohttp.TCPConnector(limit=5) # Limit concurrent connections | |
timeout = aiohttp.ClientTimeout(total=30) # Set timeout | |
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session: | |
tasks = [] | |
for bookmark in bookmarks_list: | |
task = asyncio.ensure_future(fetch_url_info(session, bookmark)) | |
tasks.append(task) | |
await asyncio.gather(*tasks) | |
logger.info("Completed processing bookmarks asynchronously") | |
except Exception as e: | |
logger.error(f"Error in asynchronous processing of bookmarks: {e}") | |
raise | |
def assign_category(bookmark): | |
""" | |
Assign a category to a bookmark based on its content. | |
""" | |
if bookmark.get('dead_link'): | |
bookmark['category'] = 'Dead Link' | |
logger.info(f"Assigned category 'Dead Link' to bookmark: {bookmark.get('url')}") | |
return bookmark | |
summary = bookmark.get('summary', '').lower() | |
assigned_category = 'Uncategorized' | |
# Keywords associated with each category | |
category_keywords = { | |
"Social Media": ["social media", "networking", "friends", "connect", "posts", "profile"], | |
"News and Media": ["news", "journalism", "media", "headlines", "breaking news"], | |
"Education and Learning": ["education", "learning", "courses", "tutorial", "university", "academy", "study"], | |
"Entertainment": ["entertainment", "movies", "tv shows", "games", "comics", "fun"], | |
"Shopping and E-commerce": ["shopping", "e-commerce", "buy", "sell", "marketplace", "deals", "store"], | |
"Finance and Banking": ["finance", "banking", "investment", "money", "economy", "stock", "trading"], | |
"Technology": ["technology", "tech", "gadgets", "software", "computers", "innovation"], | |
"Health and Fitness": ["health", "fitness", "medical", "wellness", "exercise", "diet"], | |
"Travel and Tourism": ["travel", "tourism", "destinations", "hotels", "flights", "vacation"], | |
"Food and Recipes": ["food", "recipes", "cooking", "cuisine", "restaurant", "dining"], | |
"Sports": ["sports", "scores", "teams", "athletics", "matches", "leagues"], | |
"Arts and Culture": ["arts", "culture", "museum", "gallery", "exhibition", "artistic"], | |
"Government and Politics": ["government", "politics", "policy", "election", "public service"], | |
"Business and Economy": ["business", "corporate", "industry", "economy", "markets"], | |
"Science and Research": ["science", "research", "experiment", "laboratory", "study", "scientific"], | |
"Personal Blogs and Journals": ["blog", "journal", "personal", "diary", "thoughts", "opinions"], | |
"Job Search and Careers": ["jobs", "careers", "recruitment", "resume", "employment", "hiring"], | |
"Music and Audio": ["music", "audio", "songs", "albums", "artists", "bands"], | |
"Videos and Movies": ["video", "movies", "film", "clips", "trailers", "cinema"], | |
"Reference and Knowledge Bases": ["reference", "encyclopedia", "dictionary", "wiki", "knowledge", "information"], | |
} | |
for category, keywords in category_keywords.items(): | |
for keyword in keywords: | |
if re.search(r'\b' + re.escape(keyword) + r'\b', summary): | |
assigned_category = category | |
logger.info(f"Assigned category '{assigned_category}' to bookmark: {bookmark.get('url')}") | |
break | |
if assigned_category != 'Uncategorized': | |
break | |
bookmark['category'] = assigned_category | |
if assigned_category == 'Uncategorized': | |
logger.info(f"No matching category found for bookmark: {bookmark.get('url')}") | |
return bookmark | |
def vectorize_and_index(bookmarks_list): | |
""" | |
Create vector embeddings for bookmarks and build FAISS index. | |
""" | |
logger.info("Vectorizing summaries and building FAISS index") | |
try: | |
summaries = [bookmark['summary'] for bookmark in bookmarks_list] | |
embeddings = embedding_model.encode(summaries) | |
dimension = embeddings.shape[1] | |
faiss_idx = faiss.IndexFlatL2(dimension) | |
faiss_idx.add(np.array(embeddings)) | |
logger.info("FAISS index built successfully") | |
return faiss_idx, embeddings | |
except Exception as e: | |
logger.error(f"Error in vectorizing and indexing: {e}") | |
raise | |
def display_bookmarks(): | |
""" | |
Generate HTML display for bookmarks. | |
""" | |
logger.info("Generating HTML display for bookmarks") | |
cards = '' | |
for i, bookmark in enumerate(bookmarks): | |
index = i + 1 | |
status = "β Dead Link" if bookmark.get('dead_link') else "β Active" | |
title = bookmark['title'] | |
url = bookmark['url'] | |
etag = bookmark.get('etag', 'N/A') | |
summary = bookmark.get('summary', '') | |
category = bookmark.get('category', 'Uncategorized') | |
if bookmark.get('dead_link'): | |
card_style = "border: 2px solid var(--error-color);" | |
text_style = "color: var(--error-color);" | |
else: | |
card_style = "border: 2px solid var(--success-color);" | |
text_style = "color: var(--text-color);" | |
card_html = f''' | |
<div class="card" style="{card_style}; padding: 10px; margin: 10px; border-radius: 5px;"> | |
<div class="card-content"> | |
<h3 style="{text_style}">{index}. {title} {status}</h3> | |
<p style="{text_style}"><strong>Category:</strong> {category}</p> | |
<p style="{text_style}"><strong>URL:</strong> <a href="{url}" target="_blank" style="{text_style}">{url}</a></p> | |
<p style="{text_style}"><strong>ETag:</strong> {etag}</p> | |
<p style="{text_style}"><strong>Summary:</strong> {summary}</p> | |
</div> | |
</div> | |
''' | |
cards += card_html | |
logger.info("HTML display generated") | |
return cards | |
def process_uploaded_file(file): | |
""" | |
Process the uploaded bookmarks file. | |
""" | |
global bookmarks, faiss_index | |
logger.info("Processing uploaded file") | |
if file is None: | |
logger.warning("No file uploaded") | |
return "Please upload a bookmarks HTML file.", '', gr.update(choices=[]), display_bookmarks() | |
try: | |
file_content = file.decode('utf-8') | |
except UnicodeDecodeError as e: | |
logger.error(f"Error decoding the file: {e}") | |
return "Error decoding the file. Please ensure it's a valid HTML file.", '', gr.update(choices=[]), display_bookmarks() | |
try: | |
bookmarks = parse_bookmarks(file_content) | |
except Exception as e: | |
logger.error(f"Error parsing bookmarks: {e}") | |
return "Error parsing the bookmarks HTML file.", '', gr.update(choices=[]), display_bookmarks() | |
if not bookmarks: | |
logger.warning("No bookmarks found in the uploaded file") | |
return "No bookmarks found in the uploaded file.", '', gr.update(choices=[]), display_bookmarks() | |
# Asynchronously fetch bookmark info | |
try: | |
asyncio.run(process_bookmarks_async(bookmarks)) | |
except Exception as e: | |
logger.error(f"Error processing bookmarks asynchronously: {e}") | |
return "Error processing bookmarks.", '', gr.update(choices=[]), display_bookmarks() | |
# Generate summaries and assign categories | |
for bookmark in bookmarks: | |
generate_summary(bookmark) | |
assign_category(bookmark) | |
try: | |
faiss_index, embeddings = vectorize_and_index(bookmarks) | |
except Exception as e: | |
logger.error(f"Error building FAISS index: {e}") | |
return "Error building search index.", '', gr.update(choices=[]), display_bookmarks() | |
message = f"β Successfully processed {len(bookmarks)} bookmarks." | |
logger.info(message) | |
# Generate displays and updates | |
bookmark_html = display_bookmarks() | |
choices = [f"{i+1}. {bookmark['title']} (Category: {bookmark['category']})" | |
for i, bookmark in enumerate(bookmarks)] | |
return message, bookmark_html, gr.update(choices=choices), bookmark_html | |
def delete_selected_bookmarks(selected_indices): | |
""" | |
Delete selected bookmarks. | |
""" | |
global bookmarks, faiss_index | |
if not selected_indices: | |
return "β οΈ No bookmarks selected.", gr.update(choices=[]), display_bookmarks() | |
indices = [int(s.split('.')[0])-1 for s in selected_indices] | |
indices = sorted(indices, reverse=True) | |
for idx in indices: | |
if 0 <= idx < len(bookmarks): | |
logger.info(f"Deleting bookmark at index {idx + 1}") | |
bookmarks.pop(idx) | |
if bookmarks: | |
faiss_index, embeddings = vectorize_and_index(bookmarks) | |
else: | |
faiss_index = None | |
message = "ποΈ Selected bookmarks deleted successfully." | |
logger.info(message) | |
# Update choices and display | |
choices = [f"{i+1}. {bookmark['title']} (Category: {bookmark['category']})" | |
for i, bookmark in enumerate(bookmarks)] | |
return message, gr.update(choices=choices), display_bookmarks() | |
def edit_selected_bookmarks_category(selected_indices, new_category): | |
""" | |
Edit category of selected bookmarks. | |
""" | |
if not selected_indices: | |
return "β οΈ No bookmarks selected.", gr.update(choices=[]), display_bookmarks() | |
if not new_category: | |
return "β οΈ No new category selected.", gr.update(choices=[]), display_bookmarks() | |
indices = [int(s.split('.')[0])-1 for s in selected_indices] | |
for idx in indices: | |
if 0 <= idx < len(bookmarks): | |
bookmarks[idx]['category'] = new_category | |
logger.info(f"Updated category for bookmark {idx + 1} to {new_category}") | |
message = "βοΈ Category updated for selected bookmarks." | |
logger.info(message) | |
# Update choices and display | |
choices = [f"{i+1}. {bookmark['title']} (Category: {bookmark['category']})" | |
for i, bookmark in enumerate(bookmarks)] | |
return message, gr.update(choices=choices), display_bookmarks() | |
def export_bookmarks(): | |
""" | |
Export bookmarks to HTML file. | |
""" | |
if not bookmarks: | |
logger.warning("No bookmarks to export") | |
return "β οΈ No bookmarks to export." | |
try: | |
logger.info("Exporting bookmarks to HTML") | |
soup = BeautifulSoup("<!DOCTYPE NETSCAPE-Bookmark-file-1><Title>Bookmarks</Title><H1>Bookmarks</H1>", 'html.parser') | |
dl = soup.new_tag('DL') | |
for bookmark in bookmarks: | |
dt = soup.new_tag('DT') | |
a = soup.new_tag('A', href=bookmark['url']) | |
a.string = bookmark['title'] | |
dt.append(a) | |
dl.append(dt) | |
soup.append(dl) | |
html_content = str(soup) | |
b64 = base64.b64encode(html_content.encode()).decode() | |
href = f'data:text/html;base64,{b64}' | |
logger.info("Bookmarks exported successfully") | |
return f'<a href="{href}" download="bookmarks.html">πΎ Download Exported Bookmarks</a>' | |
except Exception as e: | |
logger.error(f"Error exporting bookmarks: {e}") | |
return "β οΈ Error exporting bookmarks." | |
def chatbot_response(user_query): | |
""" | |
Generate chatbot response using Groq Cloud API. | |
""" | |
if not GROQ_API_KEY: | |
logger.warning("GROQ_API_KEY not set.") | |
return "β οΈ API key not set. Please set the GROQ_API_KEY environment variable." | |
if not bookmarks: | |
logger.warning("No bookmarks available for chatbot") | |
return "β οΈ No bookmarks available. Please upload and process your bookmarks first." | |
logger.info(f"Chatbot received query: {user_query}") | |
try: | |
max_bookmarks = 50 | |
bookmark_data = "" | |
for idx, bookmark in enumerate(bookmarks[:max_bookmarks]): | |
bookmark_data += f"{idx+1}. Title: {bookmark['title']}\nURL: {bookmark['url']}\nSummary: {bookmark['summary']}\n\n" | |
prompt = f""" | |
You are an assistant that helps users find relevant bookmarks from their collection based on their queries. | |
User Query: | |
{user_query} | |
Bookmarks: | |
{bookmark_data} | |
Please identify the most relevant bookmarks that match the user's query. Provide a concise list including the index, title, URL, and a brief summary. | |
""" | |
response = openai.ChatCompletion.create( | |
model='llama3-8b-8192', | |
messages=[ | |
{"role": "system", "content": "You help users find relevant bookmarks based on their queries."}, | |
{"role": "user", "content": prompt} | |
], | |
max_tokens=500, | |
temperature=0.7, | |
) | |
answer = response['choices'][0]['message']['content'].strip() | |
logger.info("Chatbot response generated using Groq Cloud API") | |
return answer | |
except Exception as e: | |
error_message = f"β οΈ Error processing your query: {str(e)}" | |
logger.error(error_message) | |
return error_message | |
def build_app(): | |
""" | |
Build and launch the Gradio app. | |
""" | |
try: | |
logger.info("Building Gradio app") | |
with gr.Blocks(css="app.css") as demo: | |
# General Overview | |
gr.Markdown(""" | |
# π SmartMarks - AI Browser Bookmarks Manager | |
Welcome to **SmartMarks**, your intelligent assistant for managing browser bookmarks. SmartMarks leverages AI to help you organize, search, and interact with your bookmarks seamlessly. | |
--- | |
## π **How to Use SmartMarks** | |
SmartMarks is divided into three main sections: | |
1. **π Upload and Process Bookmarks:** Import your existing bookmarks and let SmartMarks analyze and categorize them for you. | |
2. **π¬ Chat with Bookmarks:** Interact with your bookmarks using natural language queries to find relevant links effortlessly. | |
3. **π οΈ Manage Bookmarks:** View, edit, delete, and export your bookmarks with ease. | |
""") | |
# Upload and Process Bookmarks Tab | |
with gr.Tab("Upload and Process Bookmarks"): | |
gr.Markdown(""" | |
## π **Upload and Process Bookmarks** | |
### π **Steps:** | |
1. Click on the "Upload Bookmarks HTML File" button | |
2. Select your bookmarks file | |
3. Click "Process Bookmarks" to analyze and organize your bookmarks | |
""") | |
upload = gr.File(label="π Upload Bookmarks HTML File", type='binary') | |
process_button = gr.Button("βοΈ Process Bookmarks") | |
output_text = gr.Textbox(label="β Output", interactive=False) | |
bookmark_display = gr.HTML(label="π Processed Bookmarks") | |
# Chat with Bookmarks Tab | |
with gr.Tab("Chat with Bookmarks"): | |
gr.Markdown(""" | |
## π¬ **Chat with Bookmarks** | |
Ask questions about your bookmarks and get relevant results. | |
""") | |
user_input = gr.Textbox( | |
label="βοΈ Ask about your bookmarks", | |
placeholder="e.g., Do I have any bookmarks about AI?" | |
) | |
chat_button = gr.Button("π¨ Send") | |
chat_output = gr.Textbox(label="π¬ Response", interactive=False) | |
# Manage Bookmarks Tab | |
with gr.Tab("Manage Bookmarks"): | |
gr.Markdown(""" | |
## π οΈ **Manage Bookmarks** | |
Select bookmarks to delete or edit their categories. | |
""") | |
manage_output = gr.Textbox(label="π Status", interactive=False) | |
bookmark_selector = gr.CheckboxGroup( | |
label="β Select Bookmarks", | |
choices=[] | |
) | |
new_category = gr.Dropdown( | |
label="π New Category", | |
choices=CATEGORIES, | |
value="Uncategorized" | |
) | |
bookmark_display_manage = gr.HTML(label="π Bookmarks") | |
with gr.Row(): | |
delete_button = gr.Button("ποΈ Delete Selected") | |
edit_category_button = gr.Button("βοΈ Edit Category") | |
export_button = gr.Button("πΎ Export") | |
download_link = gr.HTML(label="π₯ Download") | |
# Set up event handlers | |
process_button.click( | |
process_uploaded_file, | |
inputs=upload, | |
outputs=[output_text, bookmark_display, bookmark_selector, bookmark_display_manage] | |
) | |
chat_button.click( | |
chatbot_response, | |
inputs=user_input, | |
outputs=chat_output | |
) | |
delete_button.click( | |
delete_selected_bookmarks, | |
inputs=bookmark_selector, | |
outputs=[manage_output, bookmark_selector, bookmark_display_manage] | |
) | |
edit_category_button.click( | |
edit_selected_bookmarks_category, | |
inputs=[bookmark_selector, new_category], | |
outputs=[manage_output, bookmark_selector, bookmark_display_manage] | |
) | |
export_button.click( | |
export_bookmarks, | |
outputs=download_link | |
) | |
logger.info("Launching Gradio app") | |
demo.launch(debug=True) | |
except Exception as e: | |
logger.error(f"Error building the app: {e}") | |
print(f"Error building the app: {e}") | |
if __name__ == "__main__": | |
build_app() |