Rsr2425 commited on
Commit
6c5c116
·
1 Parent(s): f2d71e3

Added initial web crawler code

Browse files
Files changed (1) hide show
  1. backend/app/crawler.py +92 -0
backend/app/crawler.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from spidy import crawler
2
+ import os
3
+ import re
4
+ import logging
5
+ from bs4 import BeautifulSoup
6
+ from urllib.parse import urljoin, urlparse
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class DomainCrawler:
12
+ def __init__(self, start_url, output_dir="crawled_content"):
13
+ self.start_url = start_url
14
+ self.domain = urlparse(start_url).netloc
15
+ self.output_dir = output_dir
16
+
17
+ # Create output directory if it doesn't exist
18
+ if not os.path.exists(output_dir):
19
+ os.makedirs(output_dir)
20
+ logger.info(f"Created output directory: {output_dir}")
21
+
22
+ # Initialize the crawler
23
+ self.crawler = crawler.Crawler(
24
+ start_url=start_url,
25
+ max_pages=1000,
26
+ timeout=10,
27
+ delay=0.5,
28
+ save_pages=True,
29
+ save_path=output_dir,
30
+ restrict_domain=True,
31
+ verbose=True,
32
+ )
33
+
34
+ # Set custom handlers
35
+ self.crawler.page_handler = self.process_page
36
+
37
+ def process_page(self, url, content):
38
+ """Custom page processor that extracts and saves content"""
39
+ try:
40
+ # Parse the HTML
41
+ soup = BeautifulSoup(content, "html.parser")
42
+
43
+ # Extract the title
44
+ title = soup.title.string if soup.title else "No Title"
45
+
46
+ # Clean the filename
47
+ filename = re.sub(r"[^\w\-_]", "_", title) + ".txt"
48
+ filepath = os.path.join(self.output_dir, filename)
49
+
50
+ # Extract main content (this is just an example - adjust for your site)
51
+ main_content = (
52
+ soup.find("main")
53
+ or soup.find("article")
54
+ or soup.find("div", class_="content")
55
+ )
56
+
57
+ # If we found main content, extract the text
58
+ if main_content:
59
+ text_content = main_content.get_text(separator="\n", strip=True)
60
+ else:
61
+ # Fallback to body text
62
+ text_content = (
63
+ soup.body.get_text(separator="\n", strip=True)
64
+ if soup.body
65
+ else "No content"
66
+ )
67
+ logger.warning(
68
+ f"No main content found for {url}, falling back to body text"
69
+ )
70
+
71
+ # Save the extracted content
72
+ with open(filepath, "w", encoding="utf-8") as f:
73
+ f.write(f"URL: {url}\n")
74
+ f.write(f"Title: {title}\n\n")
75
+ f.write(text_content)
76
+
77
+ logger.info(f"Saved content from {url} to {filepath}")
78
+
79
+ except Exception as e:
80
+ logger.error(f"Error processing {url}: {e}", exc_info=True)
81
+
82
+ return content # Return the original content for the crawler to continue
83
+
84
+ def start(self):
85
+ """Start the crawling process"""
86
+ logger.info(f"Starting crawl from {self.start_url}")
87
+ self.crawler.crawl()
88
+
89
+ # Print summary
90
+ logger.info("\nCrawl completed!")
91
+ logger.info(f"Pages crawled: {len(self.crawler.links_crawled)}")
92
+ logger.info(f"Content saved to: {os.path.abspath(self.output_dir)}")