Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,13 @@ import json
|
|
6 |
import re
|
7 |
from urllib.parse import urljoin, urlparse
|
8 |
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
class WebScrapingTool:
|
11 |
def __init__(self):
|
@@ -48,69 +55,206 @@ Your role is to act as an intelligent browser and data interpreter β able to r
|
|
48 |
except Exception as e:
|
49 |
return False, f"Failed to initialize API client: {str(e)}"
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
def scrape_webpage(self, url):
|
52 |
-
"""Scrape webpage content"""
|
53 |
try:
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
-
|
59 |
-
response.
|
|
|
|
|
|
|
|
|
|
|
60 |
|
|
|
61 |
soup = BeautifulSoup(response.content, 'html.parser')
|
62 |
|
63 |
-
# Remove
|
64 |
-
for
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
# Extract text content
|
68 |
-
text_content = soup.get_text()
|
69 |
|
70 |
-
# Clean up text
|
71 |
-
|
72 |
-
|
73 |
-
text_content = ' '.join(chunk for chunk in chunks if chunk)
|
74 |
|
75 |
-
# Extract tables
|
76 |
tables = []
|
77 |
-
for table in soup.find_all('table'):
|
78 |
table_data = []
|
79 |
headers = []
|
80 |
|
81 |
-
#
|
82 |
-
header_row = table.find('
|
|
|
|
|
|
|
|
|
|
|
83 |
if header_row:
|
84 |
-
headers = [
|
|
|
|
|
|
|
85 |
|
86 |
-
# Extract rows
|
87 |
-
|
88 |
-
|
89 |
-
if row_data:
|
90 |
-
table_data.append(row_data)
|
91 |
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
tables.append({
|
|
|
94 |
'headers': headers,
|
95 |
-
'data': table_data
|
96 |
})
|
97 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
return {
|
99 |
'success': True,
|
100 |
-
'text': text_content[:
|
101 |
'tables': tables,
|
102 |
-
'title':
|
|
|
|
|
|
|
103 |
}
|
104 |
|
105 |
-
except requests.
|
106 |
return {
|
107 |
'success': False,
|
108 |
-
'error': f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
}
|
110 |
except Exception as e:
|
111 |
return {
|
112 |
'success': False,
|
113 |
-
'error': f"
|
114 |
}
|
115 |
|
116 |
def analyze_content(self, scraped_data, user_query, api_key):
|
@@ -125,23 +269,36 @@ Your role is to act as an intelligent browser and data interpreter β able to r
|
|
125 |
|
126 |
# Prepare content for AI analysis
|
127 |
content_text = f"""
|
128 |
-
WEBPAGE
|
|
|
|
|
|
|
129 |
Title: {scraped_data['title']}
|
|
|
|
|
130 |
|
131 |
-
|
132 |
-
{scraped_data['
|
133 |
|
134 |
-
|
|
|
135 |
"""
|
136 |
|
137 |
if scraped_data['tables']:
|
138 |
-
content_text += "\n\
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
content_text += "
|
143 |
-
|
144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
|
146 |
try:
|
147 |
completion = self.client.chat.completions.create(
|
@@ -152,7 +309,7 @@ Tables Found: {len(scraped_data['tables'])}
|
|
152 |
model="deepseek/deepseek-chat-v3-0324:free",
|
153 |
messages=[
|
154 |
{"role": "system", "content": self.system_prompt},
|
155 |
-
{"role": "user", "content": f"
|
156 |
],
|
157 |
temperature=0.1,
|
158 |
max_tokens=4000
|
@@ -161,7 +318,7 @@ Tables Found: {len(scraped_data['tables'])}
|
|
161 |
return completion.choices[0].message.content
|
162 |
|
163 |
except Exception as e:
|
164 |
-
return f"Error analyzing content: {str(e)}"
|
165 |
|
166 |
def create_interface():
|
167 |
tool = WebScrapingTool()
|
@@ -176,22 +333,29 @@ def create_interface():
|
|
176 |
if not user_query.strip():
|
177 |
return "β Please enter your analysis query"
|
178 |
|
|
|
|
|
|
|
|
|
179 |
# Add progress updates
|
180 |
-
yield "π
|
|
|
|
|
|
|
181 |
|
182 |
# Scrape webpage
|
183 |
scraped_data = tool.scrape_webpage(url)
|
184 |
|
185 |
if not scraped_data['success']:
|
186 |
-
yield f"β {scraped_data['error']}"
|
187 |
return
|
188 |
|
189 |
-
yield f"β
Successfully scraped webpage!\nπ Title: {scraped_data['title']}\nπ Found {len(scraped_data['tables'])} tables\n\nπ€ Analyzing content with DeepSeek V3..."
|
190 |
|
191 |
# Analyze content
|
192 |
result = tool.analyze_content(scraped_data, user_query, api_key)
|
193 |
|
194 |
-
yield f"β
Analysis Complete!\n\n{result}"
|
195 |
|
196 |
# Create Gradio interface
|
197 |
with gr.Blocks(title="AI Web Scraping Tool", theme=gr.themes.Soft()) as app:
|
@@ -199,7 +363,7 @@ def create_interface():
|
|
199 |
# π€ AI Web Scraping Tool
|
200 |
### Powered by DeepSeek V3 & OpenRouter
|
201 |
|
202 |
-
Extract and analyze web content using advanced AI.
|
203 |
""")
|
204 |
|
205 |
with gr.Row():
|
@@ -213,51 +377,56 @@ def create_interface():
|
|
213 |
|
214 |
url_input = gr.Textbox(
|
215 |
label="π Website URL",
|
216 |
-
placeholder="https://example.com",
|
217 |
info="Enter the URL you want to scrape and analyze"
|
218 |
)
|
219 |
|
220 |
query_input = gr.Textbox(
|
221 |
label="π Analysis Query",
|
222 |
placeholder="What do you want to extract? (e.g., 'Extract main points and create a summary table')",
|
223 |
-
lines=
|
224 |
info="Describe what information you want to extract from the webpage"
|
225 |
)
|
226 |
|
227 |
with gr.Row():
|
228 |
analyze_btn = gr.Button("π Analyze Website", variant="primary", size="lg")
|
229 |
-
clear_btn = gr.Button("ποΈ Clear", variant="secondary")
|
230 |
|
231 |
with gr.Column(scale=3):
|
232 |
output = gr.Textbox(
|
233 |
label="π Analysis Results",
|
234 |
-
lines=
|
235 |
-
max_lines=
|
236 |
show_copy_button=True,
|
237 |
-
interactive=False
|
|
|
238 |
)
|
239 |
|
240 |
-
#
|
241 |
-
gr.
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
# Example websites
|
251 |
-
with gr.Accordion("π Try These Example URLs", open=False):
|
252 |
-
examples = [
|
253 |
-
["https://www.imf.org/en/Publications/WEO", "Extract economic outlook summary and GDP projections"],
|
254 |
-
["https://en.wikipedia.org/wiki/List_of_countries_by_GDP_(nominal)", "Create a table of top 10 countries by GDP"],
|
255 |
-
["https://www.who.int/news", "Summarize the latest health news"],
|
256 |
-
["https://www.nasdaq.com/market-activity/stocks", "Extract stock market data and trends"]
|
257 |
-
]
|
258 |
|
259 |
-
|
260 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
261 |
|
262 |
# Event handlers
|
263 |
analyze_btn.click(
|
@@ -271,26 +440,6 @@ def create_interface():
|
|
271 |
fn=lambda: ("", "", "", ""),
|
272 |
outputs=[api_key_input, url_input, query_input, output]
|
273 |
)
|
274 |
-
|
275 |
-
# Auto-fill example
|
276 |
-
def fill_example():
|
277 |
-
return (
|
278 |
-
"", # API key remains empty
|
279 |
-
"https://www.imf.org/en/Publications/WEO/Issues/2024/04/16/world-economic-outlook-april-2024",
|
280 |
-
"""1. Extract a summary of the main economic outlook from this page.
|
281 |
-
2. Extract any available tables or figures with global GDP growth projections.
|
282 |
-
3. Create a new table showing:
|
283 |
-
- Country/Region
|
284 |
-
- Projected GDP Growth (2024)
|
285 |
-
- Change from Previous Forecast (if available)
|
286 |
-
4. Highlight the top 3 fastest-growing economies in a separate mini-table."""
|
287 |
-
)
|
288 |
-
|
289 |
-
example_btn = gr.Button("π Load IMF Example", variant="secondary")
|
290 |
-
example_btn.click(
|
291 |
-
fn=fill_example,
|
292 |
-
outputs=[url_input, query_input]
|
293 |
-
)
|
294 |
|
295 |
return app
|
296 |
|
@@ -298,7 +447,7 @@ if __name__ == "__main__":
|
|
298 |
# Create and launch the app
|
299 |
app = create_interface()
|
300 |
|
301 |
-
# Launch with
|
302 |
app.launch(
|
303 |
-
share=True
|
304 |
)
|
|
|
6 |
import re
|
7 |
from urllib.parse import urljoin, urlparse
|
8 |
import time
|
9 |
+
import urllib3
|
10 |
+
from requests.adapters import HTTPAdapter
|
11 |
+
from urllib3.util.retry import Retry
|
12 |
+
import ssl
|
13 |
+
|
14 |
+
# Disable SSL warnings
|
15 |
+
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
16 |
|
17 |
class WebScrapingTool:
|
18 |
def __init__(self):
|
|
|
55 |
except Exception as e:
|
56 |
return False, f"Failed to initialize API client: {str(e)}"
|
57 |
|
58 |
+
def create_session(self):
|
59 |
+
"""Create a robust session with retry strategy and proper headers"""
|
60 |
+
session = requests.Session()
|
61 |
+
|
62 |
+
# Define retry strategy
|
63 |
+
retry_strategy = Retry(
|
64 |
+
total=3,
|
65 |
+
status_forcelist=[429, 500, 502, 503, 504],
|
66 |
+
method_whitelist=["HEAD", "GET", "OPTIONS"],
|
67 |
+
backoff_factor=1
|
68 |
+
)
|
69 |
+
|
70 |
+
# Mount adapter with retry strategy
|
71 |
+
adapter = HTTPAdapter(max_retries=retry_strategy)
|
72 |
+
session.mount("http://", adapter)
|
73 |
+
session.mount("https://", adapter)
|
74 |
+
|
75 |
+
# Set comprehensive headers to mimic real browser
|
76 |
+
session.headers.update({
|
77 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
|
78 |
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
79 |
+
'Accept-Language': 'en-US,en;q=0.9',
|
80 |
+
'Accept-Encoding': 'gzip, deflate, br',
|
81 |
+
'DNT': '1',
|
82 |
+
'Connection': 'keep-alive',
|
83 |
+
'Upgrade-Insecure-Requests': '1',
|
84 |
+
'Sec-Fetch-Dest': 'document',
|
85 |
+
'Sec-Fetch-Mode': 'navigate',
|
86 |
+
'Sec-Fetch-Site': 'none',
|
87 |
+
'Sec-Fetch-User': '?1',
|
88 |
+
'Cache-Control': 'max-age=0'
|
89 |
+
})
|
90 |
+
|
91 |
+
return session
|
92 |
+
|
93 |
def scrape_webpage(self, url):
|
94 |
+
"""Scrape webpage content with enhanced error handling and timeouts"""
|
95 |
try:
|
96 |
+
session = self.create_session()
|
97 |
+
|
98 |
+
# Multiple timeout attempts with increasing duration
|
99 |
+
timeout_attempts = [15, 30, 45]
|
100 |
+
|
101 |
+
for timeout in timeout_attempts:
|
102 |
+
try:
|
103 |
+
print(f"Attempting to fetch {url} with {timeout}s timeout...")
|
104 |
+
|
105 |
+
response = session.get(
|
106 |
+
url,
|
107 |
+
timeout=timeout,
|
108 |
+
verify=False, # Disable SSL verification for problematic sites
|
109 |
+
allow_redirects=True,
|
110 |
+
stream=False
|
111 |
+
)
|
112 |
+
|
113 |
+
response.raise_for_status()
|
114 |
+
break
|
115 |
+
|
116 |
+
except requests.exceptions.Timeout:
|
117 |
+
if timeout == timeout_attempts[-1]: # Last attempt
|
118 |
+
return {
|
119 |
+
'success': False,
|
120 |
+
'error': f"Connection timed out after multiple attempts. The website may be slow or blocking automated requests."
|
121 |
+
}
|
122 |
+
continue
|
123 |
+
except requests.exceptions.SSLError:
|
124 |
+
# Try with different SSL context
|
125 |
+
try:
|
126 |
+
response = session.get(
|
127 |
+
url,
|
128 |
+
timeout=timeout,
|
129 |
+
verify=False,
|
130 |
+
allow_redirects=True
|
131 |
+
)
|
132 |
+
response.raise_for_status()
|
133 |
+
break
|
134 |
+
except:
|
135 |
+
continue
|
136 |
+
|
137 |
+
# Check if we got a response
|
138 |
+
if 'response' not in locals():
|
139 |
+
return {
|
140 |
+
'success': False,
|
141 |
+
'error': "Failed to establish connection after multiple attempts"
|
142 |
+
}
|
143 |
|
144 |
+
# Check content type
|
145 |
+
content_type = response.headers.get('content-type', '').lower()
|
146 |
+
if 'text/html' not in content_type and 'text/plain' not in content_type:
|
147 |
+
return {
|
148 |
+
'success': False,
|
149 |
+
'error': f"Invalid content type: {content_type}. Expected HTML content."
|
150 |
+
}
|
151 |
|
152 |
+
# Parse HTML content
|
153 |
soup = BeautifulSoup(response.content, 'html.parser')
|
154 |
|
155 |
+
# Remove unwanted elements
|
156 |
+
for element in soup(["script", "style", "nav", "footer", "header", "aside", "noscript", "iframe"]):
|
157 |
+
element.decompose()
|
158 |
+
|
159 |
+
# Remove elements with common ad/tracking classes
|
160 |
+
ad_classes = ['ad', 'advertisement', 'banner', 'popup', 'modal', 'cookie', 'newsletter']
|
161 |
+
for class_name in ad_classes:
|
162 |
+
for element in soup.find_all(class_=re.compile(class_name, re.I)):
|
163 |
+
element.decompose()
|
164 |
|
165 |
# Extract text content
|
166 |
+
text_content = soup.get_text(separator=' ', strip=True)
|
167 |
|
168 |
+
# Clean up text - remove extra whitespace
|
169 |
+
text_content = re.sub(r'\s+', ' ', text_content)
|
170 |
+
text_content = text_content.strip()
|
|
|
171 |
|
172 |
+
# Extract tables with improved structure
|
173 |
tables = []
|
174 |
+
for i, table in enumerate(soup.find_all('table')):
|
175 |
table_data = []
|
176 |
headers = []
|
177 |
|
178 |
+
# Try to find headers in various ways
|
179 |
+
header_row = table.find('thead')
|
180 |
+
if header_row:
|
181 |
+
header_row = header_row.find('tr')
|
182 |
+
else:
|
183 |
+
header_row = table.find('tr')
|
184 |
+
|
185 |
if header_row:
|
186 |
+
headers = []
|
187 |
+
for th in header_row.find_all(['th', 'td']):
|
188 |
+
header_text = th.get_text(strip=True)
|
189 |
+
headers.append(header_text if header_text else f"Column_{len(headers)+1}")
|
190 |
|
191 |
+
# Extract all rows (skip header if it was already processed)
|
192 |
+
rows = table.find_all('tr')
|
193 |
+
start_idx = 1 if header_row and header_row in rows else 0
|
|
|
|
|
194 |
|
195 |
+
for row in rows[start_idx:]:
|
196 |
+
cells = row.find_all(['td', 'th'])
|
197 |
+
if cells:
|
198 |
+
row_data = []
|
199 |
+
for cell in cells:
|
200 |
+
cell_text = cell.get_text(strip=True)
|
201 |
+
row_data.append(cell_text)
|
202 |
+
|
203 |
+
if row_data and any(cell.strip() for cell in row_data): # Skip empty rows
|
204 |
+
table_data.append(row_data)
|
205 |
+
|
206 |
+
if table_data:
|
207 |
+
# Ensure headers match data columns
|
208 |
+
max_cols = max(len(row) for row in table_data) if table_data else 0
|
209 |
+
if len(headers) < max_cols:
|
210 |
+
headers.extend([f"Column_{i+1}" for i in range(len(headers), max_cols)])
|
211 |
+
elif len(headers) > max_cols:
|
212 |
+
headers = headers[:max_cols]
|
213 |
+
|
214 |
tables.append({
|
215 |
+
'id': i + 1,
|
216 |
'headers': headers,
|
217 |
+
'data': table_data[:50] # Limit rows to prevent overwhelming
|
218 |
})
|
219 |
|
220 |
+
# Extract metadata
|
221 |
+
title = soup.title.string.strip() if soup.title and soup.title.string else "No title found"
|
222 |
+
|
223 |
+
# Extract meta description
|
224 |
+
meta_desc = ""
|
225 |
+
desc_tag = soup.find('meta', attrs={'name': 'description'})
|
226 |
+
if desc_tag and desc_tag.get('content'):
|
227 |
+
meta_desc = desc_tag['content'].strip()
|
228 |
+
|
229 |
return {
|
230 |
'success': True,
|
231 |
+
'text': text_content[:20000], # Limit text length
|
232 |
'tables': tables,
|
233 |
+
'title': title,
|
234 |
+
'meta_description': meta_desc,
|
235 |
+
'url': url,
|
236 |
+
'content_length': len(text_content)
|
237 |
}
|
238 |
|
239 |
+
except requests.exceptions.ConnectionError as e:
|
240 |
return {
|
241 |
'success': False,
|
242 |
+
'error': f"Connection failed: {str(e)}. The website may be down or blocking requests."
|
243 |
+
}
|
244 |
+
except requests.exceptions.HTTPError as e:
|
245 |
+
return {
|
246 |
+
'success': False,
|
247 |
+
'error': f"HTTP Error {e.response.status_code}: {e.response.reason}"
|
248 |
+
}
|
249 |
+
except requests.exceptions.RequestException as e:
|
250 |
+
return {
|
251 |
+
'success': False,
|
252 |
+
'error': f"Request failed: {str(e)}"
|
253 |
}
|
254 |
except Exception as e:
|
255 |
return {
|
256 |
'success': False,
|
257 |
+
'error': f"Unexpected error while processing webpage: {str(e)}"
|
258 |
}
|
259 |
|
260 |
def analyze_content(self, scraped_data, user_query, api_key):
|
|
|
269 |
|
270 |
# Prepare content for AI analysis
|
271 |
content_text = f"""
|
272 |
+
WEBPAGE ANALYSIS REQUEST
|
273 |
+
========================
|
274 |
+
|
275 |
+
URL: {scraped_data['url']}
|
276 |
Title: {scraped_data['title']}
|
277 |
+
Content Length: {scraped_data['content_length']} characters
|
278 |
+
Tables Found: {len(scraped_data['tables'])}
|
279 |
|
280 |
+
META DESCRIPTION:
|
281 |
+
{scraped_data['meta_description']}
|
282 |
|
283 |
+
MAIN CONTENT:
|
284 |
+
{scraped_data['text']}
|
285 |
"""
|
286 |
|
287 |
if scraped_data['tables']:
|
288 |
+
content_text += f"\n\nSTRUCTURED DATA - {len(scraped_data['tables'])} TABLE(S) FOUND:\n"
|
289 |
+
content_text += "=" * 50 + "\n"
|
290 |
+
|
291 |
+
for table in scraped_data['tables']:
|
292 |
+
content_text += f"\nTABLE {table['id']}:\n"
|
293 |
+
content_text += f"Headers: {' | '.join(table['headers'])}\n"
|
294 |
+
content_text += "-" * 50 + "\n"
|
295 |
+
|
296 |
+
for i, row in enumerate(table['data'][:10]): # Show first 10 rows
|
297 |
+
content_text += f"Row {i+1}: {' | '.join(str(cell) for cell in row)}\n"
|
298 |
+
|
299 |
+
if len(table['data']) > 10:
|
300 |
+
content_text += f"... and {len(table['data']) - 10} more rows\n"
|
301 |
+
content_text += "\n"
|
302 |
|
303 |
try:
|
304 |
completion = self.client.chat.completions.create(
|
|
|
309 |
model="deepseek/deepseek-chat-v3-0324:free",
|
310 |
messages=[
|
311 |
{"role": "system", "content": self.system_prompt},
|
312 |
+
{"role": "user", "content": f"{content_text}\n\nUSER REQUEST:\n{user_query}\n\nPlease analyze the above webpage content and fulfill the user's request. Be thorough and accurate."}
|
313 |
],
|
314 |
temperature=0.1,
|
315 |
max_tokens=4000
|
|
|
318 |
return completion.choices[0].message.content
|
319 |
|
320 |
except Exception as e:
|
321 |
+
return f"Error analyzing content with AI: {str(e)}"
|
322 |
|
323 |
def create_interface():
|
324 |
tool = WebScrapingTool()
|
|
|
333 |
if not user_query.strip():
|
334 |
return "β Please enter your analysis query"
|
335 |
|
336 |
+
# Validate URL format
|
337 |
+
if not url.startswith(('http://', 'https://')):
|
338 |
+
url = 'https://' + url
|
339 |
+
|
340 |
# Add progress updates
|
341 |
+
yield "π Initializing web scraper..."
|
342 |
+
time.sleep(0.5)
|
343 |
+
|
344 |
+
yield "π Fetching webpage content (this may take a moment)..."
|
345 |
|
346 |
# Scrape webpage
|
347 |
scraped_data = tool.scrape_webpage(url)
|
348 |
|
349 |
if not scraped_data['success']:
|
350 |
+
yield f"β Scraping Failed: {scraped_data['error']}"
|
351 |
return
|
352 |
|
353 |
+
yield f"β
Successfully scraped webpage!\nπ Title: {scraped_data['title']}\nπ Found {len(scraped_data['tables'])} tables\nπ Content: {scraped_data['content_length']} characters\n\nπ€ Analyzing content with DeepSeek V3..."
|
354 |
|
355 |
# Analyze content
|
356 |
result = tool.analyze_content(scraped_data, user_query, api_key)
|
357 |
|
358 |
+
yield f"β
Analysis Complete!\n{'='*50}\n\n{result}"
|
359 |
|
360 |
# Create Gradio interface
|
361 |
with gr.Blocks(title="AI Web Scraping Tool", theme=gr.themes.Soft()) as app:
|
|
|
363 |
# π€ AI Web Scraping Tool
|
364 |
### Powered by DeepSeek V3 & OpenRouter
|
365 |
|
366 |
+
Extract and analyze web content using advanced AI. The tool handles timeouts, SSL issues, and provides robust scraping capabilities.
|
367 |
""")
|
368 |
|
369 |
with gr.Row():
|
|
|
377 |
|
378 |
url_input = gr.Textbox(
|
379 |
label="π Website URL",
|
380 |
+
placeholder="https://example.com or just example.com",
|
381 |
info="Enter the URL you want to scrape and analyze"
|
382 |
)
|
383 |
|
384 |
query_input = gr.Textbox(
|
385 |
label="π Analysis Query",
|
386 |
placeholder="What do you want to extract? (e.g., 'Extract main points and create a summary table')",
|
387 |
+
lines=4,
|
388 |
info="Describe what information you want to extract from the webpage"
|
389 |
)
|
390 |
|
391 |
with gr.Row():
|
392 |
analyze_btn = gr.Button("π Analyze Website", variant="primary", size="lg")
|
393 |
+
clear_btn = gr.Button("ποΈ Clear All", variant="secondary")
|
394 |
|
395 |
with gr.Column(scale=3):
|
396 |
output = gr.Textbox(
|
397 |
label="π Analysis Results",
|
398 |
+
lines=25,
|
399 |
+
max_lines=40,
|
400 |
show_copy_button=True,
|
401 |
+
interactive=False,
|
402 |
+
placeholder="Results will appear here after analysis..."
|
403 |
)
|
404 |
|
405 |
+
# Tips and Examples
|
406 |
+
with gr.Accordion("π‘ Usage Tips & Examples", open=False):
|
407 |
+
gr.Markdown("""
|
408 |
+
### π― Example Analysis Queries:
|
409 |
+
- **Data Extraction**: *"Extract all numerical data and organize it in a table format"*
|
410 |
+
- **Content Summary**: *"Summarize the main points in bullet format with key statistics"*
|
411 |
+
- **Table Processing**: *"Find all tables and convert them to a single consolidated format"*
|
412 |
+
- **Specific Information**: *"Extract contact information, prices, or product details"*
|
413 |
+
- **Comparison**: *"Compare different items/options mentioned and create a comparison table"*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
414 |
|
415 |
+
### π§ Technical Notes:
|
416 |
+
- **Multiple Timeouts**: Tool tries 15s, 30s, then 45s timeouts automatically
|
417 |
+
- **SSL Handling**: Bypasses SSL issues for problematic websites
|
418 |
+
- **Content Filtering**: Removes ads, popups, and unnecessary elements
|
419 |
+
- **Table Detection**: Automatically finds and structures tabular data
|
420 |
+
- **Error Recovery**: Handles connection issues and provides clear error messages
|
421 |
+
|
422 |
+
### π Works Well With:
|
423 |
+
- News websites (BBC, CNN, Reuters)
|
424 |
+
- Government sites (IMF, WHO, official statistics)
|
425 |
+
- Wikipedia and educational content
|
426 |
+
- E-commerce product pages
|
427 |
+
- Financial data sites (Yahoo Finance, MarketWatch)
|
428 |
+
- Research papers and academic sites
|
429 |
+
""")
|
430 |
|
431 |
# Event handlers
|
432 |
analyze_btn.click(
|
|
|
440 |
fn=lambda: ("", "", "", ""),
|
441 |
outputs=[api_key_input, url_input, query_input, output]
|
442 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
443 |
|
444 |
return app
|
445 |
|
|
|
447 |
# Create and launch the app
|
448 |
app = create_interface()
|
449 |
|
450 |
+
# Launch with enhanced configuration
|
451 |
app.launch(
|
452 |
+
share=True
|
453 |
)
|