Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -39,54 +39,65 @@ async def get_data(url: str):
|
|
39 |
|
40 |
# FastAPI route to scrape the website
|
41 |
@app.get("/scrape")
|
42 |
-
async def scrape_website(url):
|
43 |
async with async_playwright() as p:
|
44 |
-
#
|
45 |
-
browser = await p.
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
context = await browser.new_context(
|
49 |
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
50 |
)
|
51 |
|
52 |
-
# Set additional headers to
|
53 |
await context.set_extra_http_headers({
|
54 |
"Accept-Language": "en-US,en;q=0.9",
|
55 |
"Upgrade-Insecure-Requests": "1",
|
56 |
-
"
|
57 |
})
|
58 |
|
59 |
-
#
|
60 |
page = await context.new_page()
|
61 |
|
62 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
await page.route("**/*", lambda route: route.abort() if route.request.resource_type in ["image", "media", "stylesheet", "font", "xhr"] else route.continue_())
|
64 |
|
65 |
try:
|
66 |
-
#
|
67 |
-
await asyncio.sleep(random.uniform(1,
|
68 |
-
|
69 |
-
# Navigate to the page with an extended timeout
|
70 |
await page.goto(url, wait_until='domcontentloaded', timeout=60000)
|
71 |
|
72 |
-
#
|
73 |
await page.mouse.move(random.uniform(0, 100), random.uniform(0, 100))
|
74 |
await page.mouse.wheel(0, random.uniform(200, 400))
|
75 |
-
await asyncio.sleep(random.uniform(1,
|
|
|
76 |
# Get the title of the page
|
77 |
title = await page.title()
|
78 |
|
79 |
-
# Introduce a slight delay before fetching the links
|
80 |
-
await asyncio.sleep(random.uniform(1, 2))
|
81 |
-
|
82 |
# Get all links on the page
|
83 |
links = await page.evaluate("""() => {
|
84 |
return Array.from(document.querySelectorAll('a')).map(a => a.href);
|
85 |
}""")
|
86 |
|
87 |
-
# Introduce another slight delay before fetching the content
|
88 |
-
await asyncio.sleep(random.uniform(1, 2))
|
89 |
-
|
90 |
# Get page content (text from paragraphs and headers)
|
91 |
content = await page.evaluate("""() => {
|
92 |
let elements = Array.from(document.querySelectorAll('body *'));
|
@@ -108,3 +119,4 @@ async def scrape_website(url):
|
|
108 |
except Exception as e:
|
109 |
return {"error": str(e)}
|
110 |
|
|
|
|
39 |
|
40 |
# FastAPI route to scrape the website
|
41 |
@app.get("/scrape")
|
42 |
+
async def scrape_website(url: str):
|
43 |
async with async_playwright() as p:
|
44 |
+
# Launch browser in headless mode with custom args to bypass detection
|
45 |
+
browser = await p.chromium.launch(
|
46 |
+
headless=True,
|
47 |
+
args=[
|
48 |
+
"--disable-blink-features=AutomationControlled", # Disable automation features
|
49 |
+
"--no-sandbox",
|
50 |
+
"--disable-dev-shm-usage",
|
51 |
+
"--disable-web-security",
|
52 |
+
"--disable-setuid-sandbox",
|
53 |
+
"--disable-features=IsolateOrigins,site-per-process"
|
54 |
+
]
|
55 |
+
)
|
56 |
+
|
57 |
+
# Create a new browser context
|
58 |
context = await browser.new_context(
|
59 |
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
60 |
)
|
61 |
|
62 |
+
# Set additional headers to mimic real browsing
|
63 |
await context.set_extra_http_headers({
|
64 |
"Accept-Language": "en-US,en;q=0.9",
|
65 |
"Upgrade-Insecure-Requests": "1",
|
66 |
+
"Referer": "https://www.nasdaq.com"
|
67 |
})
|
68 |
|
69 |
+
# Create a new page
|
70 |
page = await context.new_page()
|
71 |
|
72 |
+
# Hide WebDriver and other automation-related properties
|
73 |
+
await page.add_init_script("""
|
74 |
+
Object.defineProperty(navigator, 'webdriver', {get: () => undefined});
|
75 |
+
window.navigator.chrome = { runtime: {} };
|
76 |
+
Object.defineProperty(navigator, 'plugins', {get: () => [1, 2, 3, 4, 5]});
|
77 |
+
Object.defineProperty(navigator, 'languages', {get: () => ['en-US', 'en']});
|
78 |
+
""")
|
79 |
+
|
80 |
+
# Block unnecessary resources (images, media, etc.)
|
81 |
await page.route("**/*", lambda route: route.abort() if route.request.resource_type in ["image", "media", "stylesheet", "font", "xhr"] else route.continue_())
|
82 |
|
83 |
try:
|
84 |
+
# Navigate to the page with random delays
|
85 |
+
await asyncio.sleep(random.uniform(1, 5)) # Random delay
|
|
|
|
|
86 |
await page.goto(url, wait_until='domcontentloaded', timeout=60000)
|
87 |
|
88 |
+
# Randomized mouse movement and scrolling to mimic human interaction
|
89 |
await page.mouse.move(random.uniform(0, 100), random.uniform(0, 100))
|
90 |
await page.mouse.wheel(0, random.uniform(200, 400))
|
91 |
+
await asyncio.sleep(random.uniform(1, 5)) # Another random delay
|
92 |
+
|
93 |
# Get the title of the page
|
94 |
title = await page.title()
|
95 |
|
|
|
|
|
|
|
96 |
# Get all links on the page
|
97 |
links = await page.evaluate("""() => {
|
98 |
return Array.from(document.querySelectorAll('a')).map(a => a.href);
|
99 |
}""")
|
100 |
|
|
|
|
|
|
|
101 |
# Get page content (text from paragraphs and headers)
|
102 |
content = await page.evaluate("""() => {
|
103 |
let elements = Array.from(document.querySelectorAll('body *'));
|
|
|
119 |
except Exception as e:
|
120 |
return {"error": str(e)}
|
121 |
|
122 |
+
|