Ammar Azman
commited on
Create scraper.py
Browse files- scraper.py +42 -0
scraper.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from tqdm import tqdm
|
3 |
+
from bs4 import BeautifulSoup as BS
|
4 |
+
from xtractor.utils import dumps_the_json, jsonl_converter
|
5 |
+
|
6 |
+
def get_hrefs():
|
7 |
+
all_hrefs = []
|
8 |
+
for i in range(1, 11):
|
9 |
+
# url = f"https://www.muftiselangor.gov.my/keputusan-fatwa-tidak-diwartakan/{i}"
|
10 |
+
url = f"https://www.muftiselangor.gov.my/keputusan-fatwa-diwartakan/{i}"
|
11 |
+
response = requests.get(url)
|
12 |
+
print(url, response.status_code)
|
13 |
+
bs = BS(response.text, "lxml")
|
14 |
+
div_all = bs.find_all("div", {"class":"elementor-post__card"})
|
15 |
+
hrefs = [x.find("a", href=True)["href"] for x in div_all]
|
16 |
+
all_hrefs.append(hrefs)
|
17 |
+
|
18 |
+
all_hrefs_ = [y for x in all_hrefs for y in x]
|
19 |
+
return all_hrefs_
|
20 |
+
|
21 |
+
def get_title_body(all_hrefs_):
|
22 |
+
full_data = {"title":[], "body":[]}
|
23 |
+
for link in tqdm(all_hrefs_):
|
24 |
+
title = " ".join(link.split("/")[-2].split("-")).title()
|
25 |
+
response = requests.get(link)
|
26 |
+
bs = BS(response.text, "lxml")
|
27 |
+
|
28 |
+
main = bs.find("main", class_="site-main")
|
29 |
+
body = main.text.strip()
|
30 |
+
|
31 |
+
full_data["title"].append(title)
|
32 |
+
full_data["body"].append(body)
|
33 |
+
|
34 |
+
return full_data
|
35 |
+
|
36 |
+
|
37 |
+
if __name__ == "__main__":
|
38 |
+
all_hrefs = get_hrefs()
|
39 |
+
full_data = get_title_body()
|
40 |
+
dumps_the_json(content=full_data, json_file_name="fatwa_selangor_diwartakan.json")
|
41 |
+
jsonl_converter("fatwa_selangor_diwartakan.json","fatwa_selangor_diwartakan.jsonl",
|
42 |
+
"title", "body")
|