File size: 5,362 Bytes
fb08e0a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
#!/usr/bin/env python3
# License: MIT
# Copyright (C) 2024, Shinon.
# Code inspiration from Ronsor Labs. Licensed as below.
# License: AGPL 3.0
# Copyright (C) 2023, 2024 Ronsor Labs.

# Fetches pages and page content from a page list

import asyncio
import concurrent.futures as conc
import pathlib
import urllib.parse

import aiofile
import httpx
import orjson

from proxy_magic_session import get_async_session

CONCURRENT_WORKERS = 128

executor = conc.ProcessPoolExecutor(max_workers=64)

pages_queue = asyncio.Queue(maxsize=1048576)
output_queue = asyncio.Queue(maxsize=int(CONCURRENT_WORKERS*1.5))

INPUT_JSONL = pathlib.Path("fandom_wikis_pages_210224_v2.jsonl")
OUPUT_JSONL = pathlib.Path("fandom_wikis_pages_contents_210224_v2.jsonl")


async def retry_url(url: str):
    loop = asyncio.get_running_loop()
    session: httpx.AsyncClient = get_async_session()
    session.cookies.clear()
    session.headers[
        "user-agent"
    ] = "Mozilla/6.2 (compatible; Microsoft Chrome 137.0; Apple Gecko 47.0 in AOL Firefox 37.6) Google Toolbar/1.3"
    tries = 10
    data = None
    while True:
        try:
            data = await session.post(url, follow_redirects=True)
            if data.status_code >= 300 and data.status_code < 500 and data.status_code != 403:
                if data.status_code == 410:
                    break
                print(f"[W] RetryRequest | {url} {data.status_code}")
                continue
            try:
                await loop.run_in_executor(executor, orjson.loads, data.content)
            except Exception:
                continue
            break
        except httpx.TransportError as e:
            await session.aclose()
            session: httpx.AsyncClient = get_async_session()
            print(f"[W] Retry TransportError {url} {e}")
            await asyncio.sleep(1)
            tries -= 1
        except httpx.HTTPError as e:
            print(f"[W] Uncaught Exception Retry... {url} | {e}")
            await session.aclose()
            session: httpx.AsyncClient = get_async_session()
            await asyncio.sleep(1)
            tries -= 1
        except Exception as e:
            print(f"[W] Uncaught Exception {url} | {e}")
            break
        if tries <= 0:
            print(f"[W] Tries Exceeded {url}")
            break
    await session.aclose()
    if tries <= 0:
        return
    return data


async def HTMLWorker():
    loop = asyncio.get_running_loop()
    while True:
        data = await pages_queue.get()
        if data is None:
            break
        domain, path, page = data
        query_params = {
            "action": "parse",
            "format": "json",
            "page": page,
            "prop": "text|langlinks|categories|links|templates|images|externallinks|sections|revid|displaytitle|iwlinks|properties|parsewarnings|wikitext",
        }
        print(f"[I] HTMLW | {domain} {page} query.")
        response = await retry_url(
            f"https://{domain}{path}api.php?{urllib.parse.urlencode(query_params)}"
        )
        if response and response.status_code == 200:
            print(f"[I] HTMLW | {domain} {page} dumped.")
            await output_queue.put(
                {
                    "domain": domain,
                    "path": path,
                    "page": page,
                    "content": await loop.run_in_executor(
                        executor, orjson.loads, response.content
                    ),
                }
            )


async def jsonl_writer():
    loop = asyncio.get_running_loop()
    async with aiofile.async_open(OUPUT_JSONL, "wb") as f:
        while True:
            dict_data: dict = await output_queue.get()
            if dict_data is None:
                break
            print(f"[I] Dump: {dict_data['domain']}{dict_data['path']}{dict_data['page']}")
            bytes_data = await loop.run_in_executor(executor,orjson.dumps, dict_data)
            await f.write(bytes_data)
            await f.write(b"\n")


async def main():
    loop = asyncio.get_running_loop()
    workers = [loop.create_task(HTMLWorker()) for _ in range(CONCURRENT_WORKERS)]
    writer = loop.create_task(jsonl_writer())
    with open(INPUT_JSONL, "rb") as f:
        line = f.readline()
        for line in f:
            if line:
                domain_data = orjson.loads(line)
                page_count = len(domain_data["pages"])
                if page_count <= 5:
                    print(f"[I] Skip {domain_data['domain']} due to low page count.")
                    continue
                for page in domain_data["pages"]:
                    await pages_queue.put(
                        (domain_data["domain"], domain_data["path"][:-5], page)
                    )
        for _ in range(CONCURRENT_WORKERS):
            await pages_queue.put(None)
    while True:
        done_workers = 0
        for worker in workers:
            if worker.done():
                done_workers += 1
        if done_workers != CONCURRENT_WORKERS:
            print(f"\r{done_workers} / {CONCURRENT_WORKERS} are completed.")
            await asyncio.sleep(60)
        else:
            break
    # await asyncio.gather(*workers)
    await output_queue.put(None)
    print("Sent shutdown to Jsonl writer.")
    await asyncio.gather(writer)

if __name__ == "__main__":
    asyncio.run(main())