File size: 1,502 Bytes
bbeca57
 
 
902c4dd
 
bbeca57
 
902c4dd
 
 
 
 
 
bbeca57
902c4dd
 
bbeca57
 
 
902c4dd
 
 
bbeca57
902c4dd
 
bbeca57
 
 
 
902c4dd
 
 
bbeca57
902c4dd
 
 
bbeca57
902c4dd
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import requests
from bs4 import BeautifulSoup
import re
from duckduckgo_search import DDGS

class WebScarper:
    def __init__(self):
        self.ddgs = DDGS()

    def get_urls(self, query):
        results = self.ddgs.text(query, max_results=3)
        return [result['href'] for result in results] if results else []

    def fetch_url(self, url):
        try:
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
                'Accept-Language': 'en-US,en;q=0.9',
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
            }
            response = requests.get(url, headers=headers, timeout=10)
            response.raise_for_status()
            return response.text
        except requests.exceptions.RequestException as e:
            print(f"Error fetching URL {url}: {e}")
            return None

    def get_text(self, data):
        soup = BeautifulSoup(data, 'html.parser')
        text = soup.get_text()
        cleaned_text = re.sub(r'\s+', ' ', text).strip()
        return cleaned_text[:4000] if len(cleaned_text) > 4000 else cleaned_text

    def scarpe(self, query):
        urls = self.get_urls(query)
        if not urls:
            return None

        for url in urls:
            data = self.fetch_url(url)
            if data:
                return self.get_text(data)
        return None