Spaces:
Sleeping
Sleeping
Update scraper.py
Browse files- scraper.py +16 -12
scraper.py
CHANGED
@@ -97,16 +97,20 @@ class Scraper:
|
|
97 |
|
98 |
@staticmethod
|
99 |
async def scrape(url):
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
110 |
links, text_content = await Scraper.power_scrapper_2(url)
|
111 |
-
|
112 |
-
return {"title": title, "URL": links, "Content": text_content}
|
|
|
97 |
|
98 |
@staticmethod
|
99 |
async def scrape(url):
|
100 |
+
try:
|
101 |
+
headers = {'User-Agent': 'Mozilla/5.0'}
|
102 |
+
response = requests.get(url)
|
103 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
104 |
+
|
105 |
+
title = Scraper.get_title(soup)
|
106 |
+
links = Scraper.get_links(soup)
|
107 |
+
text_content = Scraper.get_text_content(soup)
|
108 |
+
|
109 |
+
if not links:
|
110 |
+
print("Running alternative scrapper")
|
111 |
+
links, text_content = await Scraper.power_scrapper_2(url)
|
112 |
+
|
113 |
+
return {"title": title, "URL": links, "Content": text_content}
|
114 |
+
except:
|
115 |
links, text_content = await Scraper.power_scrapper_2(url)
|
116 |
+
return {"title": title, "URL": links, "Content": text_content}
|
|