Spaces:
Sleeping
Sleeping
# scraper.py | |
import asyncio | |
from playwright.async_api import async_playwright | |
from bs4 import BeautifulSoup | |
import requests | |
import requests | |
from bs4 import BeautifulSoup | |
# URL of the page to scrape | |
url = "https://www.imf.org/en/News/Articles/2024/03/21/pr2494-sri-lanka-imf-staff-level-agreement-for-second-review-sla" | |
# Send a GET request to the URL | |
response = requests.get(url) | |
# Check if the request was successful | |
if response.status_code == 200: | |
# Parse the page content | |
soup = BeautifulSoup(response.content, 'html.parser') | |
# Extract all text content (paragraphs, headers, etc.) | |
elements = soup.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']) | |
body_text = "\n".join([element.get_text().strip() for element in elements]) | |
# Extract all links | |
links = [] | |
for a_tag in soup.find_all('a', href=True): | |
links.append(a_tag['href']) | |
# Print the extracted information | |
print("Body Text:") | |
print(body_text) | |
print("\nLinks:") | |
for link in links: | |
print(link) | |
else: | |
print("Failed to retrieve the webpage") | |
class Scraper: | |
async def power_scrapper(url): | |
async with async_playwright() as p: | |
browser = await p.chromium.launch(headless=True) | |
page = await browser.new_page() | |
# Block unnecessary resources to speed up loading | |
await page.route("**/*", lambda route: route.continue_() if route.request.resource_type in ["document", "script"] else route.abort()) | |
# Open the target website | |
await page.goto(url, wait_until='domcontentloaded') | |
# Wait for a short time to ensure dynamic content is loaded | |
await page.wait_for_timeout(1000) | |
# Extract all links | |
links = await page.query_selector_all('a') | |
page_url = [] | |
page_content = [] | |
for link in links: | |
href = await link.get_attribute('href') | |
page_url.append(href) | |
# Extract all text content | |
elements = await page.query_selector_all('body *') | |
for element in elements: | |
text_content = await element.text_content() | |
if text_content and text_content.strip(): | |
page_content.append(text_content.strip()) | |
await browser.close() | |
return page_url, page_content | |
def get_links(soup): | |
links = [] | |
for link in soup.find_all('a'): | |
href = link.get('href') | |
links.append(href) | |
return links | |
def get_text_content(soup): | |
text_elements = [] | |
for tag in ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'span']: | |
elements = soup.find_all(tag) | |
for element in elements: | |
text_elements.append(element.get_text()) | |
return text_elements | |
def get_title(soup): | |
title = soup.find('title').get_text() | |
return title | |
async def scrape(url): | |
headers = {'User-Agent': 'Mozilla/5.0'} | |
response = requests.get(url, headers=headers) | |
soup = BeautifulSoup(response.content, 'html.parser') | |
title = Scraper.get_title(soup) | |
links = Scraper.get_links(soup) | |
text_content = Scraper.get_text_content(soup) | |
if not links: | |
print("Running alternative scrapper") | |
links, text_content = await Scraper.power_scrapper(url) | |
return {"title": title, "URL": links, "Content": text_content} | |