KaraKaraWitch commited on
Commit
fb08e0a
·
0 Parent(s):

Initial Commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +58 -0
  2. FandomWaifu.png +3 -0
  3. README.md +164 -0
  4. Scripts/HTML2Markdown.py +451 -0
  5. Scripts/IndexFandomPages.py +241 -0
  6. Scripts/RobloxWikiFilter.py +111 -0
  7. Scripts/WikiPageFetcher.py +157 -0
  8. Scripts/WikisIndexer.py +70 -0
  9. data/Fandom-v0.5.jsonl +3 -0
  10. v2.5-chunks-roblox-filter/fandom-aa.jsonl +3 -0
  11. v2.5-chunks-roblox-filter/fandom-ab.jsonl +3 -0
  12. v2.5-chunks-roblox-filter/fandom-ac.jsonl +3 -0
  13. v2.5-chunks-roblox-filter/fandom-ad.jsonl +3 -0
  14. v2.5-chunks-roblox-filter/fandom-ae.jsonl +3 -0
  15. v2.5-chunks-roblox-filter/fandom-af.jsonl +3 -0
  16. v2.5-chunks-roblox-filter/fandom-ag.jsonl +3 -0
  17. v2.5-chunks-roblox-filter/fandom-ah.jsonl +3 -0
  18. v2.5-chunks-roblox-filter/fandom-ai.jsonl +3 -0
  19. v2.5-chunks-roblox-filter/fandom-aj.jsonl +3 -0
  20. v2.5-chunks-roblox-filter/fandom-ak.jsonl +3 -0
  21. v2.5-chunks-roblox-filter/fandom-al.jsonl +3 -0
  22. v2.5-chunks-roblox-filter/fandom-am.jsonl +3 -0
  23. v2.5-chunks-roblox-filter/fandom-an.jsonl +3 -0
  24. v2.5-chunks-roblox-filter/fandom-ao.jsonl +3 -0
  25. v2.5-chunks-roblox-filter/fandom-ap.jsonl +3 -0
  26. v2.5-chunks-roblox-filter/fandom-aq.jsonl +3 -0
  27. v2.5-chunks-roblox-filter/fandom-ar.jsonl +3 -0
  28. v2.5-chunks-roblox-filter/fandom-as.jsonl +3 -0
  29. v2.5-chunks-roblox-filter/fandom-at.jsonl +3 -0
  30. v2.5-chunks-roblox-filter/fandom-au.jsonl +3 -0
  31. v2.5-chunks-roblox-filter/fandom-av.jsonl +3 -0
  32. v2.5-chunks-roblox-filter/fandom-aw.jsonl +3 -0
  33. v2.5-chunks-roblox-filter/fandom-ax.jsonl +3 -0
  34. v2.5-chunks-roblox-filter/fandom-ay.jsonl +3 -0
  35. v2.5-chunks-roblox-filter/fandom-az.jsonl +3 -0
  36. v2.5-chunks-roblox-filter/fandom-ba.jsonl +3 -0
  37. v2.5-chunks-roblox-filter/fandom-bb.jsonl +3 -0
  38. v2.5-chunks-roblox-filter/fandom-bc.jsonl +3 -0
  39. v2.5-chunks-roblox-filter/fandom-bd.jsonl +3 -0
  40. v2.5-chunks-roblox-filter/fandom-be.jsonl +3 -0
  41. v2.5-chunks-roblox-filter/fandom-bf.jsonl +3 -0
  42. v2.5-chunks-roblox-filter/fandom-bg.jsonl +3 -0
  43. v2.5-chunks-roblox-filter/fandom-bh.jsonl +3 -0
  44. v2.5-chunks-roblox-filter/fandom-bi.jsonl +3 -0
  45. v2.5-chunks-roblox-filter/fandom-bj.jsonl +3 -0
  46. v2.5-chunks-roblox-filter/fandom-bk.jsonl +3 -0
  47. v2.5-chunks-roblox-filter/fandom-bl.jsonl +3 -0
  48. v2.5-chunks-roblox-filter/fandom-bm.jsonl +3 -0
  49. v2.5-chunks-roblox-filter/fandom-bn.jsonl +3 -0
  50. v2.5-chunks-roblox-filter/fandom-bo.jsonl +3 -0
.gitattributes ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
56
+
57
+
58
+ *.jsonl filter=lfs diff=lfs merge=lfs -text
FandomWaifu.png ADDED

Git LFS Details

  • SHA256: 28c7d0bf6b483d6135308bf4c530e72dfab85973824702f6020475355f2d7f3d
  • Pointer size: 131 Bytes
  • Size of remote file: 750 kB
README.md ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - no-annotation
4
+ language_creators:
5
+ - crowdsourced
6
+ license:
7
+ - cc-by-sa-4.0
8
+ task_categories:
9
+ - text-generation
10
+ - fill-mask
11
+ task_ids:
12
+ - language-modeling
13
+ - masked-language-modeling
14
+ source_datasets:
15
+ - original
16
+ language:
17
+ - en
18
+
19
+ configs:
20
+ - config_name: default
21
+ data_files:
22
+ - split: final
23
+ path: "data/Fandom-v0.5.jsonl"
24
+ - split: raw-pre-roblox
25
+ path: "v2.5-chunks/*.jsonl"
26
+ - split: raw-post-roblox
27
+ path: "v2.5-chunks-roblox-filter/*.jsonl"
28
+
29
+ pretty_name: Fanatic Fandom
30
+ ---
31
+
32
+ # Dataset Card for Fanatic Fandom
33
+
34
+ ![](FandomWaifu.png "SD-Generated image styled in the same essence with fandom's logo")
35
+
36
+ *Waifu to catch your attention.*
37
+
38
+ ## Dataset Details
39
+
40
+ ### Dataset Description
41
+
42
+ *Fanatic Fandom* is a cleaned dataset of a raw scrape of fandom wikis. We crawled all the publicly available wikis and crawled each page.
43
+ Filtering to a total amount of tokens of **~7.43B** (llama-2-7b-chat-tokenizer) / **~6.27B** (RWKV Tokenizer) from primarily English language.
44
+
45
+ - **Curated by:** KaraKaraWitch
46
+ - **Funded by [optional]:** Recursal.ai (I work there lol)
47
+ - **Shared by [optional]:** KaraKaraWitch
48
+ - **Language(s) (NLP):** Primarily English
49
+ - **License:** cc-by-sa-4.0
50
+
51
+ ### Dataset Sources [optional]
52
+
53
+ - **Source Data:** [https://fandom.com/](https://fandom.com/) (Bot Crawled.)
54
+
55
+ ### Processing and Filtering
56
+
57
+ We detail the following steps involved in scraping, indexing and cleaning fandom wikis to the html content files. Here's a breakdown of the process:
58
+
59
+ 1. **Wiki Identification:**
60
+ - `WikisIndexer.py` script retrieves a list of wikis from `https://community.fandom.com/Special:NewWikis`.
61
+
62
+ 2. **Page Indexing:**
63
+ - `IndexFandomPages.py` script utilizes the MediaWiki API (`api.php`) to gather a list of pages per each wiki.
64
+
65
+ 3. **Page Fetching:**
66
+ - `WikiPageFetcher.py` script utilizes the MediaWiki API (`api.php`) to render the render the wiki page and save it to a large JSONL file.
67
+ - Additionally, any wikis with less than 5 pages are not scrapped due to assumed low-quality.
68
+
69
+ 4. **Data Chunking:**
70
+ - A single large JSONL file containing all fetched pages is split into smaller, more manageable chunks.
71
+ - This is in preparation from the 4th step.
72
+
73
+ 5. **Roblox Wiki Removal:**
74
+ - The `RobloxWikiFilter.py` script identifies and removes Roblox wikis due to the high volume of low-quality content they often generate. This filtering step simplifies the subsequent stub article removal process.
75
+ - From quick napkin math: around 15.2% (Comparing Step 3 and Step 4 results) of fandom wikis are Roblox data.
76
+
77
+ 6. **Content Transformation:**
78
+ - HTML content is converted to Markdown format. The conversion process removes unnecessary elements like figures, stub article notices, and other irrelevant data.
79
+
80
+ **Note:** Due to the passage of time (approximately 3 months as of May 6, 2024), the specific details of the crawling process may be a little hazy. The primary challenge encountered was the significant time required to complete the crawling operation.
81
+
82
+ ### Data Splits
83
+
84
+ There are 3 splits for this dataset:
85
+
86
+ - final
87
+ - Contains the final 25GB jsonl file.
88
+ - You probably want this for training.
89
+ - raw-pre-roblox
90
+ - Raw files, **before** Roblox filtering.
91
+ - Use this if you want to start from scratch and don't want to crawl fandom again.
92
+ - raw-post-roblox
93
+ - Raw files, **after** Roblox filtering.
94
+ - Roblox wikis removed.
95
+ - Use this if you want to start from scratch and don't want to crawl fandom again.
96
+
97
+ ### Data Keys
98
+
99
+ For this dataset, we have included most of the various steps for the dataset. They are listed as such below:
100
+
101
+ - `fandom_wikis_210224.csv`
102
+ - A CSV file containing a list of wikis found when scrapping from `Special:NewWikis` on 21/02/2024
103
+ - The key is as follows: `Sub Domain,Name of Wiki,Path name,0`
104
+ - The stray zero can be ignored as it does not serve any purpose.
105
+ - `fandom_wikis_pages_210224_v2.jsonl`
106
+ - Contains a jsonl list of wiki pages per each wiki.
107
+ - Each jsonl has the following keys:
108
+ - domain: str [The subdomain.]
109
+ - path: str [Path to `api.php`. Which can be different for different languages]
110
+ - pages: list[str] [A list of strings containing page names]
111
+ - `v2.5-chunks` [folder]
112
+ - Contains all the pages fetched from the list in `fandom_wikis_pages_210224_v2.jsonl`
113
+ - The original file it was from is `fandom_wikis_pages_contents_210224_v2.jsonl`, which is 283.44GB in size and can't be uploaded to HF.
114
+ - Each jsonl has the following keys:
115
+ - domain: str [The subdomain.]
116
+ - path: str [Path to `api.php`. Which can be different for different languages]
117
+ - pages: str [Page name]
118
+ - content: raw response from api.php
119
+ - `v2.5-chunks-roblox-filter` [folder]
120
+ - Contains files after roblox has been filtered.
121
+ - Each jsonl has the following keys:
122
+ - domain: str [The subdomain.]
123
+ - path: str [Path to `api.php`. Which can be different for different languages]
124
+ - pages: str [Page name]
125
+ - content: raw response from api.php
126
+ - `fandom-v0.5.jsonl` [file]
127
+ - Jsonl file containing the fully processed text.
128
+ - Each jsonl has the following keys:
129
+ - text: str [The text content.]
130
+ - meta: dict[str,str] [dictionary of metadata]
131
+ - title: str [The page/name]
132
+ - domain: str [The subdomain.]
133
+ - cats: str [Categories. Extracted and unused.]
134
+ - removed: list[str] [A list of removed stubs / html content]
135
+
136
+ - `roblox.domains.txt` [Extras]
137
+ - A txt list of Roblox domains.
138
+
139
+ ### Dataset Curators
140
+
141
+ KaraKaraWitch. (I typically hangout in PygmalionAI discord, sometimes EleutherAI. If something is wrong, `@karakarawitch` on discord.)
142
+
143
+ I'd be happy if you could spread the word and recommend this dataset for your use cases `:)`
144
+
145
+ ### Licensing Information
146
+
147
+ Most of all fandom user-created content are licensed under CC-BY-SA unless otherwise noted. By that assumption, we did not include any figures or images as they typically are not licensed under the CC-BY-SA license.
148
+
149
+ ### Citation Information
150
+
151
+ ```
152
+ @ONLINE{fantaticfandom,
153
+ title = {FanaticFandom},
154
+ author = {KaraKaraWitch, recursal.ai},
155
+ year = {2023},
156
+ howpublished = {\url{TBD}},
157
+ }
158
+ ```
159
+
160
+ ### Special Thanks
161
+
162
+ - [undeleted](https://huggingface.co/undeleted) from RyokoAI for providing initial scripts to base stuff on.
163
+ I eventually decided to write my own scraper while taking inspiration from their code.
164
+ - [Google / Gemini]... I'm horrible at writing professional stuff lol.
Scripts/HTML2Markdown.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing
2
+ import pathlib
3
+ import re
4
+ import string
5
+ import traceback
6
+ from multiprocessing import Queue
7
+
8
+ import markdownify
9
+ import orjson
10
+ import tqdm
11
+ from bs4 import BeautifulSoup, Comment, NavigableString, Tag
12
+ from markdownify import chomp
13
+
14
+ if __name__ == "__main__":
15
+ multiprocessing.set_start_method("forkserver")
16
+
17
+
18
+ queue = Queue(maxsize=64)
19
+
20
+
21
+ class WikiConverter(markdownify.MarkdownConverter):
22
+ def convert_a(self, el, text, convert_as_inline):
23
+ prefix, suffix, text = chomp(text)
24
+ if not text:
25
+ return ""
26
+ return "%s%s%s" % (prefix, text, suffix)
27
+
28
+ integer_rgx = re.compile("^[0-9]*$")
29
+
30
+ @staticmethod
31
+ def is_intable(string: str):
32
+ if not string or not string.isdigit():
33
+ return False
34
+ if WikiConverter.integer_rgx.match(string):
35
+ return True
36
+
37
+ def convert_img(self, el, text, convert_as_inline):
38
+ convert_as_inline = True
39
+ if (
40
+ convert_as_inline
41
+ and el.parent.name not in self.options["keep_inline_images_in"]
42
+ ):
43
+ return ""
44
+ return ""
45
+
46
+ def convert_li(self, el, text, convert_as_inline):
47
+ parent = el.parent
48
+ if parent is not None and parent.name == "ol":
49
+ start = parent.get("start")
50
+ if start and WikiConverter.is_intable(start.strip()):
51
+ start = int(start.strip())
52
+ else:
53
+ start = 1
54
+ bullet = "%s." % (start + parent.index(el))
55
+ else:
56
+ depth = -1
57
+ while el:
58
+ if el.name == "ul":
59
+ depth += 1
60
+ el = el.parent
61
+ bullets = self.options["bullets"]
62
+ bullet = bullets[depth % len(bullets)]
63
+ return "%s %s\n" % (bullet, (text or "").strip())
64
+
65
+
66
+ wk = WikiConverter()
67
+
68
+
69
+ def table_filtration(input_soup: BeautifulSoup, title):
70
+ for table in input_soup.select("table"):
71
+ tds = len(table.find_all("td"))
72
+ texsize = len(table.get_text().replace(" ", ""))
73
+ if tds >= texsize and texsize < 50:
74
+ table.decompose()
75
+ if tds > 20:
76
+ # print("Decompose Excessive td")
77
+ table.decompose()
78
+ # print(table.get_text().replace(" ", ""))
79
+ # print("Removing table from", title, ". TD exceeds Content")
80
+ return input_soup
81
+
82
+
83
+ def soup_data(data: BeautifulSoup, title: str):
84
+ soup = data
85
+
86
+ # Navigation elements
87
+ [toc.decompose() for toc in soup.find_all("div", attrs={"class": "toc"})]
88
+ [toc.decompose() for toc in soup.select("[class*='nav']")]
89
+ # [toc.decompose() for toc in soup.select(".navbox")]
90
+ # Edit Section
91
+ [element.decompose() for element in soup.select(".mw-editsection")]
92
+ # Remove styles
93
+ [element.decompose() for element in soup.select("style")]
94
+
95
+ [element.decompose() for element in soup.select("sup.reference")]
96
+ # star-citizen.wiki.
97
+ [element.decompose() for element in soup.select(".thumbcaption")]
98
+ # Images and pictures are not under licenses typically.
99
+ [toc.decompose() for toc in soup.select("audio")]
100
+ [toc.decompose() for toc in soup.select("picture")]
101
+ [toc.decompose() for toc in soup.select("img")]
102
+ [toc.decompose() for toc in soup.select("[class*='video']")]
103
+ # Blazblue.wiki
104
+ [toc.decompose() for toc in soup.select("[class*='tooltip']")]
105
+ [toc.decompose() for toc in soup.select("video")]
106
+
107
+ [
108
+ toc.decompose()
109
+ for toc in soup.select(".no-featured-video .featured-video-player-container")
110
+ ]
111
+ # print(soup)
112
+ cleaned_soup = table_filtration(soup, title)
113
+ composed_data = (
114
+ re.sub(r"\n\s*\n", "\n\n", wk.convert_soup(cleaned_soup))
115
+ .replace("\n |\n|", " |\n")
116
+ .strip()
117
+ )
118
+ return composed_data
119
+
120
+
121
+ raw_puncts = string.punctuation + "{}()[]【】、,゠=…‥。「」『』〝〟"
122
+
123
+ puncts = str.maketrans("", "", raw_puncts)
124
+
125
+ cpl = re.compile(r"\n\s*\n")
126
+
127
+ dbg_q = multiprocessing.Queue()
128
+
129
+
130
+ def debug_queue():
131
+ try:
132
+ with open("debug.txt", "w", encoding="utf-8") as f:
133
+ while True:
134
+ z = dbg_q.get()
135
+ if z is None or z == "None":
136
+ break
137
+ f.write(z + "\n")
138
+ f.flush()
139
+ except Exception as e:
140
+ print(e)
141
+
142
+
143
+ # Filters based on classes
144
+ msgbox = set(["mbox", "notice", "hidden", "plainlinks"])
145
+ msgbox2 = set(["mbox", "notice", "stub-box", "plainlinks"])
146
+ msgbox3 = set(["notice", "metadata", "plainlinks"])
147
+ # Aggressive class filter.
148
+ msgbox_agressive = set(
149
+ [
150
+ "mbox-w",
151
+ "mbox",
152
+ "msgbox",
153
+ "notice-container",
154
+ "notice",
155
+ "message-box",
156
+ "boilerplate",
157
+ "ambox",
158
+ "ombox",
159
+ ]
160
+ )
161
+
162
+ wikistub = set(["wikistub"])
163
+
164
+
165
+ def get_text_cleaned(elem):
166
+ return (
167
+ cpl.sub("\n\n", elem.get_text(" "))
168
+ .replace("\n", " ")
169
+ .replace(" ", " ")
170
+ .replace(" ", " ")
171
+ .lower()
172
+ )
173
+
174
+
175
+ def get_plain_text_clean(data: str):
176
+ return (
177
+ cpl.sub("\n\n", data.translate(puncts))
178
+ .replace("\n", " ")
179
+ .replace(" ", " ")
180
+ .replace(" ", " ")
181
+ .lower()
182
+ )
183
+
184
+
185
+ t_stubs = 0
186
+ f_stubs = 0
187
+
188
+ t_lock = multiprocessing.Lock()
189
+
190
+
191
+ def t_inc(idx: int):
192
+ global t_stubs
193
+ with t_lock:
194
+ t_stubs += idx
195
+
196
+
197
+ f_lock = multiprocessing.Lock()
198
+
199
+
200
+ def f_inc(idx: int):
201
+ global f_stubs
202
+ with f_lock:
203
+ f_stubs += idx
204
+
205
+
206
+ def stub_removal(soup: str, debug=None):
207
+ b_soup = BeautifulSoup(soup, "lxml")
208
+ a = get_text_cleaned(b_soup)
209
+ if "this article is a stub" in a:
210
+ parser = b_soup.select_one(".mw-parser-output")
211
+ if parser is None:
212
+ return b_soup, []
213
+ pruned = []
214
+ for child in parser.children:
215
+ if child is None:
216
+ continue
217
+ if isinstance(child, Comment):
218
+ continue
219
+ if isinstance(child, NavigableString):
220
+ # print("Nav string?")
221
+ # print(child)
222
+ # print("===========")
223
+ continue
224
+ if not isinstance(child, Tag):
225
+ # print(type(child))
226
+ continue
227
+ classes = set(i.lower() for i in child.get("class", []))
228
+ styles = child.get("style", "")
229
+ has_border = False
230
+ if styles:
231
+ styles = {
232
+ i.split(":")[0]: ":".join(i.split(":")[1:])
233
+ for i in child.get("style", "").split(";")
234
+ }
235
+
236
+ has_border = any(
237
+ [
238
+ styles.get("border"),
239
+ styles.get("border-width") and styles.get("border-style"),
240
+ styles.get("border-top"),
241
+ styles.get("border-bottom"),
242
+ styles.get("border-left"),
243
+ styles.get("border-right"),
244
+ styles.get("background"),
245
+ ]
246
+ )
247
+ # print(styles)
248
+
249
+ child_text = get_text_cleaned(child)
250
+ has_stub_word = "stub" in child_text and "this article" in child_text
251
+ c_name = "" if not child.name else child.name
252
+
253
+ if not has_stub_word:
254
+ continue
255
+ if (
256
+ len(classes.intersection(msgbox)) == len(msgbox)
257
+ or len(classes.intersection(msgbox2)) == len(msgbox2)
258
+ or len(classes.intersection(msgbox3)) == len(msgbox3)
259
+ ): # Seems to be safe.
260
+ child.decompose()
261
+ pruned.append("mbox")
262
+ elif len(classes.intersection(msgbox_agressive)) > 0: # Aggressive
263
+ child.decompose()
264
+ # dbg_q.put(f'[I aggressive "mbox3_aggressive" prune]: {debug}')
265
+ pruned.append("mbox3_aggressive")
266
+ elif len(classes.intersection(wikistub)) == 1: # Seems to be safe.
267
+ child.decompose()
268
+ pruned.append("wikistub[gods-games-we-play]")
269
+ elif "table" in c_name: # Bit risky, but i guess it works?
270
+ pruned.append("table[stub-word]")
271
+ child.decompose()
272
+ elif "dl" in c_name: # Seems to be safe.
273
+ if len(child.find_all("dd", recursive=False)) == 1:
274
+ pruned.append("dl > dd")
275
+ child.decompose()
276
+ elif "div" in c_name:
277
+ inner_elements = [
278
+ i for i in child.find_all(recursive=False) if isinstance(i, Tag)
279
+ ]
280
+ if len(inner_elements) == 0:
281
+ # print(child.find_all(recursive=False))
282
+ # dbg_q.put(f"[No Inner Element()?]: {len(inner_elements)}")
283
+ continue
284
+ stub_inner = get_text_cleaned(inner_elements[0])
285
+ has_stub_word = "stub" in stub_inner and "this article"
286
+
287
+ if len(inner_elements) == 0 or len(inner_elements) > 2:
288
+ # dbg_q.put(
289
+ # f"[W No Prune len()? has_stub_word]: {debug} {has_stub_word} {len(inner_elements)}"
290
+ # )
291
+ continue
292
+ if (
293
+ inner_elements[0].name
294
+ and inner_elements[0].name == "table"
295
+ and has_stub_word
296
+ ):
297
+ pruned.append("table[stub-word]")
298
+ child.decompose()
299
+ elif has_border and inner_elements[0].name.lower() in [
300
+ "div",
301
+ "p",
302
+ "span",
303
+ ]:
304
+ # dbg_q.put(f'[I Risky "has_border" prune]: {debug}')
305
+ pruned.append("risky[border]")
306
+ else:
307
+ # dbg_q.put(
308
+ # f"[W No Prune div? has_stub_word]: {debug} {has_stub_word} {inner_elements[0].name}"
309
+ # )
310
+ pruned.append("?")
311
+ elif "p" in c_name: # Really risky. will try logging it first.
312
+ child.decompose()
313
+ # dbg_q.put(f'[I Risky "p" prune]: {debug}')
314
+ f_inc(1)
315
+ pruned.append("risky[p]")
316
+ elif "center" in c_name: # Really risky. will try logging it first.
317
+ # dbg_q.put(f'[I Risky "center" prune]: {debug}')
318
+ child.decompose()
319
+ f_inc(1)
320
+ pruned.append("risky[center]")
321
+ if pruned:
322
+ t_inc(len(pruned))
323
+ # print("Pruned", pruned, debug)
324
+ return b_soup, pruned
325
+ else:
326
+ # dbg_q.put(f"[W No Prune?]: {debug}")
327
+ # print(f"[W No Prune?]: {debug}")
328
+ return b_soup, []
329
+
330
+ return b_soup, []
331
+
332
+
333
+ def writer(fp: str, writer_queue: multiprocessing.Queue):
334
+ pbar = tqdm.tqdm()
335
+ try:
336
+ with open(fp, "wb") as fl:
337
+ while True:
338
+ q = writer_queue.get(block=True)
339
+ if q is None or isinstance(q, str) and q == "None":
340
+ print("Q Empty, exit.")
341
+ break
342
+ elif isinstance(q, bytes):
343
+ fl.write(q)
344
+ fl.write(b"\n")
345
+ pbar.update(1)
346
+ except Exception as e:
347
+ print("Writer Crashed?")
348
+ traceback.print_exception(e)
349
+
350
+
351
+ def mt_fn(bytes_data: bytes, write_queue: multiprocessing.Queue):
352
+ data = orjson.loads(bytes_data)
353
+ dbg = f"domain: {data['domain']} title: {data['page']}"
354
+ if "parse" not in data["content"]:
355
+ print(
356
+ "Missing parse content",
357
+ "domain",
358
+ data["domain"],
359
+ "title",
360
+ data["page"],
361
+ )
362
+ return
363
+ lower_title = data["page"].lower()
364
+ if (
365
+ lower_title.startswith("forum:")
366
+ or lower_title.startswith("discussion:")
367
+ or lower_title.startswith("thread:")
368
+ or lower_title.startswith("comments:")
369
+ or lower_title.startswith("comment:")
370
+ ):
371
+ return
372
+ if lower_title.endswith("gallery"):
373
+ return
374
+ soup = data["content"]["parse"]["text"]["*"]
375
+ cats = ",".join([z["*"] for z in data["content"]["parse"]["categories"]]).lower()
376
+ unstubbed_soup, removed = stub_removal(soup, debug=dbg)
377
+ text = soup_data(unstubbed_soup, data["page"])
378
+ unpunct = get_plain_text_clean(text)
379
+ if len(unpunct) > 64:
380
+ # print(text[:64])
381
+ write_queue.put(
382
+ orjson.dumps(
383
+ {
384
+ "text": text,
385
+ "meta": {
386
+ "title": data["page"],
387
+ "domain": data["domain"],
388
+ "cats": cats,
389
+ "removed": removed,
390
+ },
391
+ }
392
+ )
393
+ )
394
+
395
+
396
+ def err_handler(e):
397
+ if "KeyboardInterrupt" not in str(e):
398
+ traceback.print_exception(e)
399
+
400
+
401
+ def main():
402
+ with multiprocessing.Pool(processes=64) as pool:
403
+ managed = multiprocessing.Manager()
404
+ writer_out = managed.Queue()
405
+ write_proc = multiprocessing.Process(
406
+ target=writer, args=("test.jsonl", writer_out), daemon=False
407
+ )
408
+ # debug_tasks.start()
409
+ write_proc.start()
410
+
411
+ tasks = []
412
+ for file in pathlib.Path("v2.5-chunks-roblox-filter").iterdir():
413
+ with open(file, "rb") as f:
414
+ iter_wrapper = f # tqdm.tqdm(f)
415
+ for line in iter_wrapper:
416
+ tasks.append(
417
+ pool.apply_async(
418
+ mt_fn, args=(line, writer_out), error_callback=err_handler
419
+ )
420
+ )
421
+
422
+ if len(tasks) >= 100000:
423
+ print("Waiting for Chunked task to complete.")
424
+ for task in tasks:
425
+ if task.ready():
426
+ continue
427
+ task.wait()
428
+ tasks = []
429
+ print("[I] ========== Task gen done", file)
430
+ print("Waiting for Chunked task to complete.")
431
+ for task in tasks:
432
+ if task.ready():
433
+ continue
434
+ task.wait()
435
+ print("Cleanup")
436
+ # print("Stubs", t_stubs, "FStubs", f_stubs)
437
+ dbg_q.put("None")
438
+ # debug_tasks.join()
439
+ writer_out.put("None")
440
+ # while not writer_out.empty():
441
+ # print(f"Waiting for empty writer {writer_out.qsize()} items left")
442
+ # time.sleep(5)
443
+
444
+ write_proc.join()
445
+
446
+ pool.close()
447
+ pool.join()
448
+
449
+
450
+ if __name__ == "__main__":
451
+ main()
Scripts/IndexFandomPages.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # License: MIT
3
+ # Copyright (C) 2024, Shinon.
4
+ # Code inspiration from Ronsor Labs. Licensed as below.
5
+ # License: AGPL 3.0
6
+ # Copyright (C) 2023, 2024 Ronsor Labs.
7
+ import asyncio
8
+ import concurrent.futures as conc
9
+ import csv
10
+ import pathlib
11
+ import random
12
+ import urllib.parse
13
+
14
+ import aiofile
15
+ import httpx
16
+ import orjson
17
+ import tqdm
18
+ from bs4 import BeautifulSoup
19
+
20
+ from proxy_magic_session import get_async_session
21
+
22
+ CONCURRENT_WORKERS = 128
23
+
24
+ executor = conc.ProcessPoolExecutor(max_workers=64)
25
+
26
+ queue = asyncio.Queue(maxsize=1048576)
27
+
28
+ WIKI_LIST = pathlib.Path("fandom_wikis_210224.csv")
29
+ OUPUT_JSONL = pathlib.Path("fandom_wikis_pages_210224_v2.jsonl")
30
+
31
+
32
+ def parse_special_index(html_text: str):
33
+ doc = BeautifulSoup(html_text, "lxml")
34
+
35
+ page_list_li = doc.select("ul.mw-allpages-chunk li a")
36
+ page_list = set()
37
+ for page_item in page_list_li:
38
+ page_title = page_item.get("title", "")
39
+ if not page_title or page_title is None:
40
+ print("[W] no page title?")
41
+ continue
42
+ if page_title.lower().rstrip().endswith("(redirect)"):
43
+ continue
44
+ else:
45
+ page_list.add(page_item["title"])
46
+ page_list = list(page_list)
47
+
48
+ next_url = doc.select(".mw-allpages-nav a")
49
+ if next_url is None or len(next_url) == 0:
50
+ next_url = None
51
+ else:
52
+ candidates = next_url
53
+ next_url = None
54
+ for x in candidates:
55
+ if "next page" in x.text.lower():
56
+ if "index.php" not in x["href"].lower():
57
+ next_url = x["href"].split("=", 1)
58
+ next_url[1] = urllib.parse.quote_plus(
59
+ next_url[1].replace("+", "__SAFE_PLUS__")
60
+ ).replace("__SAFE_PLUS__", "+")
61
+ next_url = "=".join(next_url)
62
+ else:
63
+ next_url = x["href"]
64
+
65
+ return page_list, next_url
66
+
67
+
68
+ async def domain_procesor(domain: str, path: str):
69
+ session: httpx.AsyncClient = get_async_session()
70
+ loop = asyncio.get_running_loop()
71
+ session.cookies.clear()
72
+ session.headers["user-agent"] = (
73
+ "Mozilla/6.2 (compatible; Microsoft Chrome 137.0; Apple Gecko 47.0 in AOL Firefox 37.6) Google Toolbar/1.3"
74
+ )
75
+ print(f"[I] Processing: https://{domain}{path}Special:AllPages")
76
+ tries = 10
77
+ # pbar = tqdm.tqdm(desc=f"{domain}")
78
+ data = None
79
+ while True:
80
+ try:
81
+ data = await session.get(
82
+ f"https://{domain}{path}Special:AllPages", follow_redirects=True
83
+ )
84
+ if data.status_code != 200:
85
+ if data.status_code == 410:
86
+ break
87
+
88
+ # print(f"https://{domain}{path}Special:AllPages", data.status_code)
89
+ continue
90
+ break
91
+ except httpx.TransportError as e:
92
+ await session.aclose()
93
+ session: httpx.AsyncClient = get_async_session()
94
+ print(f"[W] Retry TransportError https://{domain}{path} {e}")
95
+ await asyncio.sleep(1)
96
+ tries -= 1
97
+ except httpx.HTTPError as e:
98
+ print(f"[W] Uncaught Exception Retry... https://{domain}{path} | {e}")
99
+ await session.aclose()
100
+ session: httpx.AsyncClient = get_async_session()
101
+ # print(f"[W] Retry TransportError https://{domain}{path} {e}")
102
+ await asyncio.sleep(1)
103
+ tries -= 1
104
+ except Exception as e:
105
+ print(f"[W] Uncaught Exception https://{domain}{path} | {e}")
106
+ break
107
+ if tries <= 0:
108
+ print(f"[W] Tries Exceeded https://{domain}{path}")
109
+ break
110
+
111
+ if tries <= 0 or data is None:
112
+ return
113
+ if data.status_code == 410:
114
+ return
115
+
116
+ # Handle redirected domains
117
+ domain = data.url.host
118
+
119
+ page_list, next_url = await loop.run_in_executor(
120
+ executor, parse_special_index, data.text
121
+ )
122
+ # pbar.update(len(page_list))
123
+ while next_url:
124
+ tries = 10
125
+ data = None
126
+ while True:
127
+ try:
128
+ data = await session.get(
129
+ f"https://{domain}{next_url}", follow_redirects=True
130
+ )
131
+ if data.status_code != 200:
132
+ if data.status_code == 410:
133
+ break
134
+ print(f"https://{domain}{next_url}", data.status_code)
135
+ continue
136
+ break
137
+ except httpx.TransportError as e:
138
+ await session.aclose()
139
+ session: httpx.AsyncClient = get_async_session()
140
+ print(f"[W2] Retry TransportError https://{domain}{next_url} {e}")
141
+ await asyncio.sleep(1)
142
+ tries -= 1
143
+ except httpx.HTTPError as e:
144
+ print(
145
+ f"[W2] Uncaught Exception Retry... https://{domain}{next_url} | {e}"
146
+ )
147
+ await session.aclose()
148
+ session: httpx.AsyncClient = get_async_session()
149
+ # print(f"[W] Retry TransportError https://{domain}{path} {e}")
150
+ await asyncio.sleep(1)
151
+ tries -= 1
152
+ except Exception as e:
153
+ print(f"[W2] Uncaught Exception https://{domain}{next_url} | {e}")
154
+ break
155
+ if tries <= 0:
156
+ print(f"[W2] Tries Exceeded https://{domain}{next_url}")
157
+ break
158
+ if tries <= 0 or data is None:
159
+ return
160
+ if data.status_code == 410:
161
+ return
162
+ new_page_list, next_url = await loop.run_in_executor(
163
+ executor, parse_special_index, data.text
164
+ )
165
+ # pbar.update(len(new_page_list))
166
+ page_list.extend(new_page_list)
167
+ # pbar.close()
168
+ print(f"[I] Done: {domain} | {len(page_list)}")
169
+ await session.aclose()
170
+ return page_list
171
+
172
+
173
+ export_queue = asyncio.Queue(CONCURRENT_WORKERS + 1)
174
+
175
+
176
+ async def compiler_worker():
177
+ loop = asyncio.get_running_loop()
178
+ async with aiofile.async_open(OUPUT_JSONL, "ab") as f:
179
+ while True:
180
+ page_data = await export_queue.get()
181
+ if page_data is None:
182
+ break
183
+ domain, pages, path = page_data
184
+ print(f"[I] Dump: {domain}")
185
+ fi = {"domain": domain, "path": path, "pages": pages}
186
+ bytes_data = await loop.run_in_executor(executor, orjson.dumps, fi)
187
+ await f.write(bytes_data)
188
+ await f.write(b"\n")
189
+ print(f"[I] OKDump: {domain}")
190
+
191
+
192
+ async def worker():
193
+ await asyncio.sleep(random.uniform(1, CONCURRENT_WORKERS / 60))
194
+ while True:
195
+ domain_root = await queue.get()
196
+ if domain_root is None:
197
+ break
198
+ domain, _, path, __ = domain_root
199
+ if path == "":
200
+ path = "/wiki/"
201
+ if "vpn-restricted" in domain:
202
+ continue
203
+ pages = await domain_procesor(domain, path)
204
+ if pages is None:
205
+ continue
206
+ await export_queue.put((domain, pages, path))
207
+
208
+
209
+ async def main():
210
+ loop = asyncio.get_running_loop()
211
+ workers = [loop.create_task(worker()) for _ in range(CONCURRENT_WORKERS)]
212
+ writer = loop.create_task(compiler_worker())
213
+
214
+ seen_domains = set()
215
+ if OUPUT_JSONL.exists():
216
+ print("[I] Fetching seen domains...")
217
+ with open(OUPUT_JSONL, "rb") as f:
218
+ for line in tqdm.tqdm(f, desc="Domains Parsed"):
219
+ seen_domains.add(orjson.loads(line)["domain"])
220
+
221
+ with open(WIKI_LIST) as f:
222
+ reader = csv.reader(f)
223
+ for line in reader:
224
+ if len(line) == 0:
225
+ continue
226
+ domain, friendly_name, path, has_scraped = line
227
+ if domain[0] == "#":
228
+ continue
229
+ if domain in seen_domains:
230
+ continue
231
+ await queue.put((domain, friendly_name, path, has_scraped))
232
+ for _ in range(CONCURRENT_WORKERS + 1):
233
+ await queue.put(None)
234
+
235
+ await asyncio.gather(*workers)
236
+ await export_queue.put(None)
237
+ await asyncio.gather(writer)
238
+
239
+
240
+ if __name__ == "__main__":
241
+ asyncio.run(main())
Scripts/RobloxWikiFilter.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Purging roblox domain games base of word matching
2
+ # - Shinon
3
+ import multiprocessing
4
+ import re
5
+ from bs4 import BeautifulSoup
6
+ import orjson
7
+ import pathlib
8
+ import typer
9
+ from sqlitedict import SqliteDict
10
+
11
+ cpl = re.compile(r"(roblox | roblox)", flags=re.IGNORECASE)
12
+
13
+ app = typer.Typer()
14
+
15
+ def filter_worker(
16
+ chunked_jsonl: pathlib.Path, out_file: pathlib.Path, index_file: pathlib.Path
17
+ ):
18
+ domains = set([i.strip() for i in index_file.read_text().split("\n") if i.strip()])
19
+ with open(chunked_jsonl, "rb") as f, open(out_file, "wb") as out_fp:
20
+ for line in f:
21
+ data = orjson.loads(line)
22
+ if data["domain"] in domains:
23
+ continue
24
+ out_fp.write(line)
25
+
26
+
27
+ def roblox_worker(file: pathlib.Path, index_file: pathlib.Path, txt_file: pathlib.Path):
28
+ with SqliteDict(index_file) as fout:
29
+ with open(file, "rb") as f:
30
+ sync_time = 0
31
+ for line in f:
32
+ data = orjson.loads(line)
33
+ # dbg = f"domain: {data['domain']} title: {data['page']}"
34
+ if "parse" not in data["content"]:
35
+ print(
36
+ "Missing parse content",
37
+ "domain",
38
+ data["domain"],
39
+ "title",
40
+ data["page"],
41
+ )
42
+ continue
43
+ soup = data["content"]["parse"]["text"]["*"]
44
+ composed_soup = (
45
+ re.sub(r"\n\s*\n", "\n\n", BeautifulSoup(soup, "lxml").get_text())
46
+ .replace("\n", "")
47
+ .strip()
48
+ )
49
+ robloxed = len(cpl.findall(composed_soup))
50
+ if data["domain"] not in fout:
51
+ fout[data["domain"]] = 0
52
+ fout[data["domain"]] += robloxed
53
+ sync_time += 1
54
+ if sync_time % 10000 == 0 and sync_time != 0:
55
+ fout.commit()
56
+ fout.commit()
57
+ with open(txt_file, "wb") as f:
58
+ for domain, roblox_count in fout.iteritems():
59
+ if roblox_count >= 2:
60
+ print(domain)
61
+ f.write(domain.encode() + b"\n")
62
+
63
+
64
+ def err_cb(e):
65
+ print(e)
66
+
67
+ @app.command()
68
+ def index(folder:pathlib.Path,index_folder:pathlib.Path):
69
+ with multiprocessing.Pool(processes=64) as pool:
70
+ fn = []
71
+ for file in pathlib.Path(folder).iterdir():
72
+ fn.append(
73
+ pool.apply_async(
74
+ roblox_worker,
75
+ args=(
76
+ pathlib.Path("v2.5-chunks") / f"{file.name}",
77
+ index_folder / f"{file.stem}.sqlite",
78
+ index_folder / f"{file.name}.domains.txt",
79
+ ),
80
+ error_callback=err_cb,
81
+ )
82
+ )
83
+ for task in fn:
84
+ task.wait()
85
+ pool.close()
86
+ pool.join()
87
+
88
+ @app.command()
89
+ def main(folder:pathlib.Path,output_folder:pathlib.Path,domains_txt:pathlib.Path):
90
+ with multiprocessing.Pool(processes=64) as pool:
91
+ fn = []
92
+ for file in folder.iterdir():
93
+ fn.append(
94
+ pool.apply_async(
95
+ filter_worker,
96
+ args=(
97
+ file,
98
+ output_folder / f"{file.name}.jsonl",
99
+ domains_txt
100
+ ),
101
+ error_callback=err_cb,
102
+ )
103
+ )
104
+ for task in fn:
105
+ task.wait()
106
+ pool.close()
107
+ pool.join()
108
+
109
+ app()
110
+
111
+ # main()
Scripts/WikiPageFetcher.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # License: MIT
3
+ # Copyright (C) 2024, Shinon.
4
+ # Code inspiration from Ronsor Labs. Licensed as below.
5
+ # License: AGPL 3.0
6
+ # Copyright (C) 2023, 2024 Ronsor Labs.
7
+
8
+ # Fetches pages and page content from a page list
9
+
10
+ import asyncio
11
+ import concurrent.futures as conc
12
+ import pathlib
13
+ import urllib.parse
14
+
15
+ import aiofile
16
+ import httpx
17
+ import orjson
18
+
19
+ from proxy_magic_session import get_async_session
20
+
21
+ CONCURRENT_WORKERS = 128
22
+
23
+ executor = conc.ProcessPoolExecutor(max_workers=64)
24
+
25
+ pages_queue = asyncio.Queue(maxsize=1048576)
26
+ output_queue = asyncio.Queue(maxsize=int(CONCURRENT_WORKERS*1.5))
27
+
28
+ INPUT_JSONL = pathlib.Path("fandom_wikis_pages_210224_v2.jsonl")
29
+ OUPUT_JSONL = pathlib.Path("fandom_wikis_pages_contents_210224_v2.jsonl")
30
+
31
+
32
+ async def retry_url(url: str):
33
+ loop = asyncio.get_running_loop()
34
+ session: httpx.AsyncClient = get_async_session()
35
+ session.cookies.clear()
36
+ session.headers[
37
+ "user-agent"
38
+ ] = "Mozilla/6.2 (compatible; Microsoft Chrome 137.0; Apple Gecko 47.0 in AOL Firefox 37.6) Google Toolbar/1.3"
39
+ tries = 10
40
+ data = None
41
+ while True:
42
+ try:
43
+ data = await session.post(url, follow_redirects=True)
44
+ if data.status_code >= 300 and data.status_code < 500 and data.status_code != 403:
45
+ if data.status_code == 410:
46
+ break
47
+ print(f"[W] RetryRequest | {url} {data.status_code}")
48
+ continue
49
+ try:
50
+ await loop.run_in_executor(executor, orjson.loads, data.content)
51
+ except Exception:
52
+ continue
53
+ break
54
+ except httpx.TransportError as e:
55
+ await session.aclose()
56
+ session: httpx.AsyncClient = get_async_session()
57
+ print(f"[W] Retry TransportError {url} {e}")
58
+ await asyncio.sleep(1)
59
+ tries -= 1
60
+ except httpx.HTTPError as e:
61
+ print(f"[W] Uncaught Exception Retry... {url} | {e}")
62
+ await session.aclose()
63
+ session: httpx.AsyncClient = get_async_session()
64
+ await asyncio.sleep(1)
65
+ tries -= 1
66
+ except Exception as e:
67
+ print(f"[W] Uncaught Exception {url} | {e}")
68
+ break
69
+ if tries <= 0:
70
+ print(f"[W] Tries Exceeded {url}")
71
+ break
72
+ await session.aclose()
73
+ if tries <= 0:
74
+ return
75
+ return data
76
+
77
+
78
+ async def HTMLWorker():
79
+ loop = asyncio.get_running_loop()
80
+ while True:
81
+ data = await pages_queue.get()
82
+ if data is None:
83
+ break
84
+ domain, path, page = data
85
+ query_params = {
86
+ "action": "parse",
87
+ "format": "json",
88
+ "page": page,
89
+ "prop": "text|langlinks|categories|links|templates|images|externallinks|sections|revid|displaytitle|iwlinks|properties|parsewarnings|wikitext",
90
+ }
91
+ print(f"[I] HTMLW | {domain} {page} query.")
92
+ response = await retry_url(
93
+ f"https://{domain}{path}api.php?{urllib.parse.urlencode(query_params)}"
94
+ )
95
+ if response and response.status_code == 200:
96
+ print(f"[I] HTMLW | {domain} {page} dumped.")
97
+ await output_queue.put(
98
+ {
99
+ "domain": domain,
100
+ "path": path,
101
+ "page": page,
102
+ "content": await loop.run_in_executor(
103
+ executor, orjson.loads, response.content
104
+ ),
105
+ }
106
+ )
107
+
108
+
109
+ async def jsonl_writer():
110
+ loop = asyncio.get_running_loop()
111
+ async with aiofile.async_open(OUPUT_JSONL, "wb") as f:
112
+ while True:
113
+ dict_data: dict = await output_queue.get()
114
+ if dict_data is None:
115
+ break
116
+ print(f"[I] Dump: {dict_data['domain']}{dict_data['path']}{dict_data['page']}")
117
+ bytes_data = await loop.run_in_executor(executor,orjson.dumps, dict_data)
118
+ await f.write(bytes_data)
119
+ await f.write(b"\n")
120
+
121
+
122
+ async def main():
123
+ loop = asyncio.get_running_loop()
124
+ workers = [loop.create_task(HTMLWorker()) for _ in range(CONCURRENT_WORKERS)]
125
+ writer = loop.create_task(jsonl_writer())
126
+ with open(INPUT_JSONL, "rb") as f:
127
+ line = f.readline()
128
+ for line in f:
129
+ if line:
130
+ domain_data = orjson.loads(line)
131
+ page_count = len(domain_data["pages"])
132
+ if page_count <= 5:
133
+ print(f"[I] Skip {domain_data['domain']} due to low page count.")
134
+ continue
135
+ for page in domain_data["pages"]:
136
+ await pages_queue.put(
137
+ (domain_data["domain"], domain_data["path"][:-5], page)
138
+ )
139
+ for _ in range(CONCURRENT_WORKERS):
140
+ await pages_queue.put(None)
141
+ while True:
142
+ done_workers = 0
143
+ for worker in workers:
144
+ if worker.done():
145
+ done_workers += 1
146
+ if done_workers != CONCURRENT_WORKERS:
147
+ print(f"\r{done_workers} / {CONCURRENT_WORKERS} are completed.")
148
+ await asyncio.sleep(60)
149
+ else:
150
+ break
151
+ # await asyncio.gather(*workers)
152
+ await output_queue.put(None)
153
+ print("Sent shutdown to Jsonl writer.")
154
+ await asyncio.gather(writer)
155
+
156
+ if __name__ == "__main__":
157
+ asyncio.run(main())
Scripts/WikisIndexer.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # License: MIT
3
+ # Copyright (C) 2024, Shinon.
4
+
5
+ # Retrieves a full list of wikis to be scraped
6
+
7
+ import asyncio
8
+ import csv
9
+ import urllib.parse
10
+
11
+ from bs4 import BeautifulSoup
12
+
13
+ from proxy_magic_session import get_async_session
14
+
15
+
16
+ async def main():
17
+ with open("fandom_wikis_210224.csv", "w", newline="") as f:
18
+ writer = csv.writer(f)
19
+ session = get_async_session()
20
+ root = "https://community.fandom.com"
21
+ r = await session.get(f"{root}/wiki/Special:NewWikis?limit=500")
22
+ if r.status_code == 200:
23
+ nrow = 0
24
+ soup = BeautifulSoup(r.text, "lxml")
25
+ for doms in soup.select(".mw-spcontent li > a"):
26
+ href: str = doms.get("href", "")
27
+ if href:
28
+ parsed = urllib.parse.urlparse(href)
29
+
30
+ domain = parsed.netloc
31
+ if parsed.path.strip("/"):
32
+ sp = f"{parsed.path}wiki/"
33
+ else:
34
+ sp = "/wiki/"
35
+ print("Add wiki:", domain, "|", doms.get_text().strip())
36
+ writer.writerow([domain, doms.get_text().strip(), sp, 0])
37
+ nrow += 1
38
+ next_page = soup.find("a", attrs={"rel": "next", "class": "mw-nextlink"})
39
+ if next_page:
40
+ next_page_url = f'{root}{next_page.get("href")}'
41
+ else:
42
+ next_page_url = None
43
+ while next_page_url:
44
+ nrow = 0
45
+ r = await session.get(next_page_url)
46
+ soup = BeautifulSoup(r.text, "lxml")
47
+ for doms in soup.select(".mw-spcontent li > a"):
48
+ href: str = doms.get("href", "")
49
+ if href:
50
+ parsed = urllib.parse.urlparse(href)
51
+
52
+ domain = parsed.netloc
53
+ if parsed.path.strip("/"):
54
+ sp = f"{parsed.path}wiki/"
55
+ else:
56
+ sp = "/wiki/"
57
+ print("Add wiki:", domain, "|", doms.get_text().strip())
58
+ writer.writerow([domain, doms.get_text().strip(), sp, 0])
59
+ nrow += 1
60
+ next_page = soup.find(
61
+ "a", attrs={"rel": "next", "class": "mw-nextlink"}
62
+ )
63
+ if next_page:
64
+ next_page_url = f'{root}{next_page.get("href")}'
65
+ else:
66
+ next_page_url = None
67
+ print(next_page_url)
68
+
69
+
70
+ asyncio.run(main())
data/Fandom-v0.5.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0c66575d06311ea94497ff4f62651c7b99e918ec27bbb166d49f38590591837
3
+ size 25083924695
v2.5-chunks-roblox-filter/fandom-aa.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f59bb9d312f6f7c649a6db84205b8ca104bdd1fd06868f7d2c1535caa6e0c36
3
+ size 3076260389
v2.5-chunks-roblox-filter/fandom-ab.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9e3c4c10cb4e3df72d248e7c6362f76a401894d03c41a980f55ccde2e050ebf
3
+ size 2988982948
v2.5-chunks-roblox-filter/fandom-ac.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d38deff269ca23cc85011b860b845505ada0ce20af7feaf197ed4772a7edee1c
3
+ size 3253909135
v2.5-chunks-roblox-filter/fandom-ad.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5283fd799c4f160444983fc6f38ca570cac3107392530ca75e784b88aa5e8036
3
+ size 3202289490
v2.5-chunks-roblox-filter/fandom-ae.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d839a51220ec84d10febb7b33f2b8776a54a5add2cecce53d1a5159da9d0dffa
3
+ size 3175983811
v2.5-chunks-roblox-filter/fandom-af.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ab2cbc57c60a696e0430351df70f82d61121b1ccb2f09cd3ad2720dc85c7648
3
+ size 3361323936
v2.5-chunks-roblox-filter/fandom-ag.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5c5ec05324dee4c3695e65956ad24f931c12fe24f1ab26d77c84151d101f73d
3
+ size 3216472903
v2.5-chunks-roblox-filter/fandom-ah.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae213c10090278755cfead7debdfcc1b6f2c056b6fe90597cb75626679ea6468
3
+ size 3532652692
v2.5-chunks-roblox-filter/fandom-ai.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd8922e5c041f990fe91ee32193dfb6ab7c092684dc102201be151e51e32d33c
3
+ size 3665469694
v2.5-chunks-roblox-filter/fandom-aj.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d6e3e70c99d71be460fe3eab294e1bbcd988c0269bfd88d538ace89a75f9e9e
3
+ size 3573593240
v2.5-chunks-roblox-filter/fandom-ak.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75de4684a965dd4e6dbdf4892ed1414320e4e5fa12cf4cf3686c1c15c3930368
3
+ size 4066781175
v2.5-chunks-roblox-filter/fandom-al.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6690a5b3b620e92b48fd1269666a71e0b9606f3305c65a20e0768e3076d750e9
3
+ size 3119261297
v2.5-chunks-roblox-filter/fandom-am.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c52ed6fb0e18cdbc01d32ca6891dba67dd4c912dd8f58265e4e1dde156bffea2
3
+ size 3646904767
v2.5-chunks-roblox-filter/fandom-an.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8413fd13326b024486f594db083001c65bbecb65ca5661543ca320cf10446548
3
+ size 4214395319
v2.5-chunks-roblox-filter/fandom-ao.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe8ae43585028bf611d697011b43ca1e7c2812d1144f51d93cbdd91eca06acc9
3
+ size 3875038386
v2.5-chunks-roblox-filter/fandom-ap.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1db23c0ec3c9c2b251013e7ba331f0cdead73658b2c28a69e2e346001700cdf0
3
+ size 4000054995
v2.5-chunks-roblox-filter/fandom-aq.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:432556b415988f770f9957e14f0097c8642aeb53485419dc1c8fb5c65e43b223
3
+ size 3911995237
v2.5-chunks-roblox-filter/fandom-ar.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ef73dbeaf46796298e386b4dd5c0a08a4af09345b55bc552ba6dc130dba7789
3
+ size 4495367225
v2.5-chunks-roblox-filter/fandom-as.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad67103c167002ff3a862a0038ff3be7f1133d80c68077ed467192ea5a483fdc
3
+ size 3381623126
v2.5-chunks-roblox-filter/fandom-at.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f6b2abd7685f71a4f33c930e6e86300d4bcaedb4183e896b5b118db7980f0ea
3
+ size 4068383946
v2.5-chunks-roblox-filter/fandom-au.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dab5a1747bddc6feb3c40b8969681788866e8843220c541a7819367d36acf31f
3
+ size 3591797240
v2.5-chunks-roblox-filter/fandom-av.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a044338f26fcba9f380a2a19855882aac3be152b859aafdc5046c8c3559a04e
3
+ size 3471398559
v2.5-chunks-roblox-filter/fandom-aw.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ca28c522d354023470e393339a050d77c92422bb427baef9a1371d795b1988b
3
+ size 3738377264
v2.5-chunks-roblox-filter/fandom-ax.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4eeb7d77d6d91e5490448e5ff9bfa5e48eb71cbbfa140f8f29b71f41b860d667
3
+ size 3892347988
v2.5-chunks-roblox-filter/fandom-ay.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3047dd355353ce1d8c0963c7059ad966cb946a84f5c4eeffb5b9f14438d894e5
3
+ size 5369778357
v2.5-chunks-roblox-filter/fandom-az.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff65e7bb5b8622feaf3c7de73522097167ad1ae0a238c786d4d42b810f3b9eba
3
+ size 5928051969
v2.5-chunks-roblox-filter/fandom-ba.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8626ab8ffa39b0ce96c5891093d78a9a25268edcab323d795258c65072cdcde0
3
+ size 5197485894
v2.5-chunks-roblox-filter/fandom-bb.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f9b4b594cfbbcb66e4f9d0190369f396dd0f0856dff715e2bf5c40620220313
3
+ size 3179725310
v2.5-chunks-roblox-filter/fandom-bc.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3bf5b3c63392cbd6903cd499892b15411ebba3ddfabbdc0309d3aa6dfc85a27
3
+ size 3585388360
v2.5-chunks-roblox-filter/fandom-bd.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2563cdc8bce7893bfcd230d04a7d30b63e1aaeefac353dbd3fb349dcc7176a3
3
+ size 3459115448
v2.5-chunks-roblox-filter/fandom-be.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f731690825e17d951ef9b06bf1e7ce4aa13ecbecb3c186a2598fb30d2b2ce56b
3
+ size 3321816281
v2.5-chunks-roblox-filter/fandom-bf.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90219016838c5829e7f6590e13d059cd1205488d72d780c5e577792b3a971a5e
3
+ size 2695687623
v2.5-chunks-roblox-filter/fandom-bg.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edc888422d39f3b0375a9ad1911330185c8ca26fd5d10e316d437376e08bd7fd
3
+ size 3499549822
v2.5-chunks-roblox-filter/fandom-bh.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03ca99944602eae8234559903eb7a503d77bb0d7b0804aea0b199d6ed5b89e88
3
+ size 3640070882
v2.5-chunks-roblox-filter/fandom-bi.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4539ba3eb4914fece398d4fbed2eb7e6c2556ff7135fed48d7ecbc7db007bff
3
+ size 4729760443
v2.5-chunks-roblox-filter/fandom-bj.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30efa8318134f32f8dccf9a3bc1f27280d42f82ca84b64757a265dfc55b7946e
3
+ size 3904301398
v2.5-chunks-roblox-filter/fandom-bk.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71cffbfd115c9a184b3114de259a3cfcc2374c5d3735490688901a2aaced6748
3
+ size 3460240622
v2.5-chunks-roblox-filter/fandom-bl.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e79228cb7a83307e274d186d587d2235ac8c187e50cbae53167fafe737bb0d5
3
+ size 3498847536
v2.5-chunks-roblox-filter/fandom-bm.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf8cada7d8b8a0b98f7de53a1c7c37b7ea0b9c4ce009856af5628eefb8a160cf
3
+ size 3488156067
v2.5-chunks-roblox-filter/fandom-bn.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f78061817f14d4e8ecbf2cfa63bde739f0caadfff8299bb70a62c333db1a6b8
3
+ size 3059357476
v2.5-chunks-roblox-filter/fandom-bo.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdfa5f2c3b82455af2933b820acb50b603781e661fb7f7a81e19dd92f9e8c179
3
+ size 2420305940