import gradio as gr import requests from PIL import Image import io import os BASE_URL = "https://api.jigsawstack.com/v1" headers = {"x-api-key": os.getenv("")} # ----------------- JigsawStack API Wrappers ------------------ def enhanced_ai_scrape(input_method, url, html, prompts_str, selector, page_pos): def error_response(message): return ( message, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), ) try: # Validate element prompts prompts = [p.strip() for p in prompts_str.split(",") if p.strip()] if not prompts: return error_response("Error: No element prompts provided.") if len(prompts) > 5: return error_response("Error: Maximum 5 element prompts allowed.") payload = { "element_prompts": prompts, "root_element_selector": selector or "main", "page_position": int(page_pos) if str(page_pos).strip().isdigit() else 1 } # Add URL or HTML based on input method if input_method == "URL": if not url or not url.strip(): return error_response("Error: URL is required when using URL input method.") payload["url"] = url.strip() elif input_method == "HTML Content": if not html or not html.strip(): return error_response("Error: HTML content is required when using HTML input method.") payload["html"] = html.strip() response = requests.post(f"{BASE_URL}/ai/scrape", headers=headers, json=payload) response.raise_for_status() result = response.json() if not result.get("success"): return error_response(f"Error: Scraping failed - {result.get('message', 'Unknown error')}") # Extract all the data context = result.get("context", {}) selectors = result.get("selectors", {}) data = result.get("data", []) links = result.get("link", []) current_page = result.get("page_position", 1) total_pages = result.get("page_position_length", 1) # Format pagination info pagination_text = f"Page {current_page} of {total_pages}" if total_pages > 1: pagination_text += f" (Total pages available: {total_pages})" status_text = f"✅ Successfully scraped {len(data)} data items" if context: status_text += f" with {len(context)} context elements" return ( status_text, gr.update(value=context, visible=True if context else False), gr.update(value=selectors, visible=True if selectors else False), gr.update(value=data, visible=True if data else False), gr.update(value=links, visible=True if links else False), gr.update(value=pagination_text, visible=True), ) except requests.exceptions.RequestException as req_err: return error_response(f"Request failed: {str(req_err)}") except Exception as e: return error_response(f"Unexpected error: {str(e)}") # ----------------- Gradio UI ------------------ with gr.Blocks() as demo: gr.Markdown("""

🧩 JigsawStack AI Scraper

Extract structured data from web pages with advanced AI models.

For more details and API usage, see the documentation.

""") with gr.Row(): with gr.Column(): gr.Markdown("#### Input Method") input_method_scraper = gr.Radio( choices=["URL", "HTML Content"], label="Choose Input Method", value="URL" ) # Conditional inputs based on selection url_scraper = gr.Textbox( label="Page URL", placeholder="https://example.com/pricing", info="URL of the page to scrape" ) html_content = gr.Textbox( label="HTML Content", lines=8, placeholder="...", visible=False, info="Raw HTML content to scrape" ) gr.Markdown("#### Scraping Configuration") element_prompts = gr.Textbox( label="Element Prompts (comma-separated)", lines=3, placeholder="Plan title, Plan price, Features, Button text", info="Items to scrape (max 5). E.g., 'Plan price', 'Plan title'" ) root_selector = gr.Textbox( label="Root Element Selector", value="main", placeholder="main, .container, #content", info="CSS selector to limit scraping scope (default: main)" ) page_position = gr.Number( label="Page Position", value=1, minimum=1, info="For pagination, current page number (min: 1)" ) with gr.Column(): gr.Markdown("#### Results") scrape_status = gr.Textbox( label="Status", interactive=False, placeholder="Ready to scrape..." ) gr.Markdown("#### Extracted Data") context_output = gr.JSON( label="Context Data", visible=False ) selectors_output = gr.JSON( label="CSS Selectors Used", visible=False ) detailed_data = gr.JSON( label="Detailed Scrape Data", visible=False ) links_data = gr.JSON( label="Detected Links", visible=False ) gr.Markdown("#### Pagination Info") pagination_info = gr.Textbox( label="Page Information", interactive=False, visible=False ) scrape_btn = gr.Button("Scrape with AI", variant="primary") # Function to show/hide input groups based on selection def update_scraper_input_visibility(method): if method == "URL": return gr.Textbox(visible=True), gr.Textbox(visible=False) elif method == "HTML Content": return gr.Textbox(visible=False), gr.Textbox(visible=True) else: return gr.Textbox(visible=True), gr.Textbox(visible=False) input_method_scraper.change( update_scraper_input_visibility, inputs=input_method_scraper, outputs=[url_scraper, html_content] ) scrape_btn.click( enhanced_ai_scrape, inputs=[input_method_scraper, url_scraper, html_content, element_prompts, root_selector, page_position], outputs=[scrape_status, context_output, selectors_output, detailed_data, links_data, pagination_info] ) demo.launch()