Spaces:
Sleeping
Sleeping
Upload 5 files
Browse files- Dockerfile +47 -0
- main.py +126 -0
- requirements.txt +0 -0
- static/styles.css +14 -0
- templates/index.html +70 -0
Dockerfile
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use official Python image
|
2 |
+
FROM python:3.10-slim
|
3 |
+
|
4 |
+
# Set environment variables
|
5 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
6 |
+
ENV PYTHONUNBUFFERED=1
|
7 |
+
|
8 |
+
# Set working directory
|
9 |
+
WORKDIR /app
|
10 |
+
|
11 |
+
# Install system dependencies for Playwright
|
12 |
+
RUN apt-get update && apt-get install -y \
|
13 |
+
wget \
|
14 |
+
curl \
|
15 |
+
gnupg \
|
16 |
+
ca-certificates \
|
17 |
+
fonts-liberation \
|
18 |
+
libasound2 \
|
19 |
+
libatk-bridge2.0-0 \
|
20 |
+
libatk1.0-0 \
|
21 |
+
libcups2 \
|
22 |
+
libdbus-1-3 \
|
23 |
+
libgdk-pixbuf2.0-0 \
|
24 |
+
libnspr4 \
|
25 |
+
libnss3 \
|
26 |
+
libx11-xcb1 \
|
27 |
+
libxcomposite1 \
|
28 |
+
libxdamage1 \
|
29 |
+
libxrandr2 \
|
30 |
+
xdg-utils \
|
31 |
+
&& rm -rf /var/lib/apt/lists/*
|
32 |
+
|
33 |
+
# Copy requirements and install Python dependencies
|
34 |
+
COPY requirements.txt .
|
35 |
+
RUN pip install --upgrade pip && pip install -r requirements.txt
|
36 |
+
|
37 |
+
# Install Playwright browsers
|
38 |
+
RUN playwright install --with-deps
|
39 |
+
|
40 |
+
# Copy application files
|
41 |
+
COPY . .
|
42 |
+
|
43 |
+
# Expose port (Hugging Face uses port 7860 or 8000 typically)
|
44 |
+
EXPOSE 7860
|
45 |
+
|
46 |
+
# Command to run the app
|
47 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
main.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException, Request
|
2 |
+
from pydantic import HttpUrl
|
3 |
+
from playwright.async_api import async_playwright
|
4 |
+
from urllib.parse import urljoin, urlparse
|
5 |
+
import logging
|
6 |
+
from fastapi.responses import JSONResponse
|
7 |
+
from fastapi.templating import Jinja2Templates
|
8 |
+
from fastapi.staticfiles import StaticFiles
|
9 |
+
from typing import List, Dict
|
10 |
+
import asyncio
|
11 |
+
|
12 |
+
# Set up logging
|
13 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
14 |
+
|
15 |
+
app = FastAPI(title="Website Scraper API (Enhanced for Images)")
|
16 |
+
|
17 |
+
# Mount static files
|
18 |
+
app.mount("/static", StaticFiles(directory="static"), name="static")
|
19 |
+
|
20 |
+
# Set up Jinja2 templates
|
21 |
+
templates = Jinja2Templates(directory="templates")
|
22 |
+
|
23 |
+
# Maximum number of pages to scrape
|
24 |
+
MAX_PAGES = 20
|
25 |
+
|
26 |
+
async def scrape_page(url: str, visited: set, base_domain: str) -> tuple[Dict, set]:
|
27 |
+
"""Scrape a single page for text, images, and links using Playwright."""
|
28 |
+
try:
|
29 |
+
async with async_playwright() as p:
|
30 |
+
browser = await p.chromium.launch(headless=True)
|
31 |
+
context = await browser.new_context(
|
32 |
+
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
33 |
+
viewport={"width": 1280, "height": 720}
|
34 |
+
)
|
35 |
+
page = await context.new_page()
|
36 |
+
|
37 |
+
# Navigate and wait for content
|
38 |
+
await page.goto(url, wait_until="networkidle", timeout=30000)
|
39 |
+
|
40 |
+
# Scroll to trigger lazy-loaded images
|
41 |
+
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
|
42 |
+
await page.wait_for_timeout(2000) # Wait for lazy-loaded content
|
43 |
+
|
44 |
+
# Extract text content
|
45 |
+
text_content = await page.evaluate(
|
46 |
+
"""() => document.body.innerText"""
|
47 |
+
)
|
48 |
+
text_content = ' '.join(text_content.split()) if text_content else ""
|
49 |
+
|
50 |
+
# Extract images from src, data-src, and srcset
|
51 |
+
images = await page.evaluate(
|
52 |
+
"""() => {
|
53 |
+
const imgElements = document.querySelectorAll('img');
|
54 |
+
const imgUrls = new Set();
|
55 |
+
imgElements.forEach(img => {
|
56 |
+
if (img.src) imgUrls.add(img.src);
|
57 |
+
if (img.dataset.src) imgUrls.add(img.dataset.src);
|
58 |
+
if (img.srcset) {
|
59 |
+
img.srcset.split(',').forEach(src => {
|
60 |
+
const url = src.trim().split(' ')[0];
|
61 |
+
if (url) imgUrls.add(url);
|
62 |
+
});
|
63 |
+
}
|
64 |
+
});
|
65 |
+
return Array.from(imgUrls);
|
66 |
+
}"""
|
67 |
+
)
|
68 |
+
images = [urljoin(url, img) for img in images if img]
|
69 |
+
|
70 |
+
# Extract links
|
71 |
+
links = await page.evaluate(
|
72 |
+
"""() => Array.from(document.querySelectorAll('a')).map(a => a.href)"""
|
73 |
+
)
|
74 |
+
links = set(urljoin(url, link) for link in links if urlparse(urljoin(url, link)).netloc == base_domain and urljoin(url, link) not in visited)
|
75 |
+
|
76 |
+
await browser.close()
|
77 |
+
|
78 |
+
page_data = {
|
79 |
+
"url": url,
|
80 |
+
"text": text_content,
|
81 |
+
"images": images
|
82 |
+
}
|
83 |
+
return page_data, links
|
84 |
+
|
85 |
+
except Exception as e:
|
86 |
+
logging.error(f"Error scraping {url}: {e}")
|
87 |
+
return {}, set()
|
88 |
+
|
89 |
+
@app.get("/scrape")
|
90 |
+
async def crawl_website(url: HttpUrl):
|
91 |
+
"""Crawl the website starting from the given URL and return scraped data for up to 10 pages as JSON."""
|
92 |
+
try:
|
93 |
+
visited = set()
|
94 |
+
to_visit = {str(url)}
|
95 |
+
base_domain = urlparse(str(url)).netloc
|
96 |
+
results = []
|
97 |
+
|
98 |
+
while to_visit and len(visited) < MAX_PAGES:
|
99 |
+
current_url = to_visit.pop()
|
100 |
+
if current_url in visited:
|
101 |
+
continue
|
102 |
+
|
103 |
+
logging.info(f"Scraping: {current_url}")
|
104 |
+
visited.add(current_url)
|
105 |
+
|
106 |
+
page_data, new_links = await scrape_page(current_url, visited, base_domain)
|
107 |
+
if page_data:
|
108 |
+
results.append(page_data)
|
109 |
+
to_visit.update(new_links)
|
110 |
+
|
111 |
+
# Small delay to avoid overwhelming the server
|
112 |
+
await asyncio.sleep(0.5)
|
113 |
+
|
114 |
+
return JSONResponse(content={"pages": results})
|
115 |
+
|
116 |
+
except Exception as e:
|
117 |
+
raise HTTPException(status_code=500, detail=f"Scraping failed: {str(e)}")
|
118 |
+
|
119 |
+
@app.get("/")
|
120 |
+
async def serve_home(request: Request):
|
121 |
+
"""Serve the frontend HTML page."""
|
122 |
+
return templates.TemplateResponse("index.html", {"request": request})
|
123 |
+
|
124 |
+
if __name__ == "__main__":
|
125 |
+
import uvicorn
|
126 |
+
uvicorn.run(app, host="0.0.0.0", port=8001)
|
requirements.txt
ADDED
Binary file (174 Bytes). View file
|
|
static/styles.css
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
textarea {
|
2 |
+
width: 100%;
|
3 |
+
min-height: 400px;
|
4 |
+
max-height: 600px;
|
5 |
+
overflow-y: auto;
|
6 |
+
background-color: #f8fafc;
|
7 |
+
padding: 1rem;
|
8 |
+
border-radius: 0.5rem;
|
9 |
+
border: 1px solid #e2e8f0;
|
10 |
+
resize: vertical;
|
11 |
+
font-family: monospace;
|
12 |
+
font-size: 14px;
|
13 |
+
box-sizing: border-box;
|
14 |
+
}
|
templates/index.html
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Web Scraper</title>
|
7 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
8 |
+
<link rel="stylesheet" href="/static/styles.css">
|
9 |
+
</head>
|
10 |
+
<body class="min-h-screen bg-gray-100 flex flex-col items-center p-4">
|
11 |
+
<div class="w-full bg-white rounded-lg shadow-md p-6">
|
12 |
+
<h1 class="text-2xl font-bold text-gray-800 mb-4">Web Scraper</h1>
|
13 |
+
<form id="scrapeForm" class="flex flex-col space-y-4">
|
14 |
+
<input
|
15 |
+
type="url"
|
16 |
+
id="urlInput"
|
17 |
+
name="url"
|
18 |
+
placeholder="Enter URL to scrape (e.g., https://example.com)"
|
19 |
+
class="w-full p-2 border rounded focus:outline-none focus:ring-2 focus:ring-blue-500"
|
20 |
+
required
|
21 |
+
/>
|
22 |
+
<button
|
23 |
+
type="submit"
|
24 |
+
id="scrapeButton"
|
25 |
+
class="w-full bg-blue-500 text-white p-2 rounded hover:bg-blue-600 disabled:bg-blue-300"
|
26 |
+
>
|
27 |
+
Scrape Website
|
28 |
+
</button>
|
29 |
+
</form>
|
30 |
+
<div id="error" class="hidden mt-4 p-2 bg-red-100 text-red-700 rounded"></div>
|
31 |
+
<div id="result" class="hidden mt-4 p-4 bg-gray-50 rounded border">
|
32 |
+
<h2 class="text-lg font-semibold text-gray-700">Scraped Data</h2>
|
33 |
+
<textarea id="jsonOutput" class="mt-2 text-sm text-gray-600" readonly></textarea>
|
34 |
+
</div>
|
35 |
+
</div>
|
36 |
+
|
37 |
+
<script>
|
38 |
+
document.getElementById('scrapeForm').addEventListener('submit', async (e) => {
|
39 |
+
e.preventDefault();
|
40 |
+
const url = document.getElementById('urlInput').value;
|
41 |
+
const errorDiv = document.getElementById('error');
|
42 |
+
const resultDiv = document.getElementById('result');
|
43 |
+
const jsonOutput = document.getElementById('jsonOutput');
|
44 |
+
const button = document.getElementById('scrapeButton');
|
45 |
+
|
46 |
+
// Reset UI
|
47 |
+
errorDiv.classList.add('hidden');
|
48 |
+
resultDiv.classList.add('hidden');
|
49 |
+
button.disabled = true;
|
50 |
+
button.textContent = 'Scraping...';
|
51 |
+
|
52 |
+
try {
|
53 |
+
const response = await fetch(`/scrape?url=${encodeURIComponent(url)}`);
|
54 |
+
if (!response.ok) {
|
55 |
+
throw new Error(`HTTP error! Status: ${response.status}`);
|
56 |
+
}
|
57 |
+
const data = await response.json();
|
58 |
+
jsonOutput.value = JSON.stringify(data, null, 2);
|
59 |
+
resultDiv.classList.remove('hidden');
|
60 |
+
} catch (err) {
|
61 |
+
errorDiv.textContent = `Error: ${err.message}`;
|
62 |
+
errorDiv.classList.remove('hidden');
|
63 |
+
} finally {
|
64 |
+
button.disabled = false;
|
65 |
+
button.textContent = 'Scrape Website';
|
66 |
+
}
|
67 |
+
});
|
68 |
+
</script>
|
69 |
+
</body>
|
70 |
+
</html>
|