Upload scripts/stack_parser.py with huggingface_hub
Browse files- scripts/stack_parser.py +415 -0
scripts/stack_parser.py
ADDED
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import multiprocessing
|
2 |
+
import random
|
3 |
+
import traceback
|
4 |
+
from lxml import etree
|
5 |
+
import pathlib
|
6 |
+
import orjson
|
7 |
+
import tqdm
|
8 |
+
import typer
|
9 |
+
import urllib.parse
|
10 |
+
from sqlitedict import SqliteDict
|
11 |
+
|
12 |
+
app = typer.Typer()
|
13 |
+
|
14 |
+
|
15 |
+
def fast_iter(context, func, *args, **kwargs):
|
16 |
+
"""
|
17 |
+
http://lxml.de/parsing.html#modifying-the-tree
|
18 |
+
Based on Liza Daly's fast_iter
|
19 |
+
http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
|
20 |
+
See also http://effbot.org/zone/element-iterparse.htm
|
21 |
+
"""
|
22 |
+
for event, elem in context:
|
23 |
+
func(elem, *args, **kwargs)
|
24 |
+
# It's safe to call clear() here because no descendants will be
|
25 |
+
# accessed
|
26 |
+
elem.clear()
|
27 |
+
# Also eliminate now-empty references from the root node to elem
|
28 |
+
for ancestor in elem.xpath("ancestor-or-self::*"):
|
29 |
+
while ancestor.getprevious() is not None:
|
30 |
+
del ancestor.getparent()[0]
|
31 |
+
del context
|
32 |
+
|
33 |
+
|
34 |
+
@app.command()
|
35 |
+
def decadence(stack_folder: pathlib.Path, output_file: pathlib.Path):
|
36 |
+
post_history = stack_folder / "PostHistory.xml"
|
37 |
+
if not post_history.exists():
|
38 |
+
raise Exception()
|
39 |
+
# posts_hist = {}
|
40 |
+
print("Working on", stack_folder.name)
|
41 |
+
sql_dict_posts = SqliteDict(
|
42 |
+
f"tmp/{stack_folder.name}.hist.tmp.sqlite", flag="n", tablename="dict_hist"
|
43 |
+
)
|
44 |
+
try:
|
45 |
+
with tqdm.tqdm() as pbar:
|
46 |
+
ctx = etree.iterparse(post_history, tag="row")
|
47 |
+
|
48 |
+
def element_processor2(elem):
|
49 |
+
p_type = int(elem.get("PostHistoryTypeId"))
|
50 |
+
pid: str = elem.get("PostId")
|
51 |
+
if p_type in range(1, 4):
|
52 |
+
if pid in sql_dict_posts:
|
53 |
+
post_data = sql_dict_posts[pid]
|
54 |
+
else:
|
55 |
+
post_data = sql_dict_posts[pid] = {}
|
56 |
+
if p_type == 1:
|
57 |
+
post_data["title"] = urllib.parse.unquote(
|
58 |
+
elem.get("Text", "")
|
59 |
+
).replace("\r\n", "\n")
|
60 |
+
elif p_type == 2:
|
61 |
+
post_data["body"] = urllib.parse.unquote(
|
62 |
+
elem.get("Text", "")
|
63 |
+
).replace("\r\n", "\n")
|
64 |
+
elif p_type == 3:
|
65 |
+
post_data["tags"] = urllib.parse.unquote(
|
66 |
+
elem.get("Text", "")
|
67 |
+
).replace("\r\n", "\n")
|
68 |
+
# print(post_data)
|
69 |
+
sql_dict_posts[pid] = post_data
|
70 |
+
pbar.update(1)
|
71 |
+
elif p_type in range(4, 10):
|
72 |
+
if pid in sql_dict_posts:
|
73 |
+
post_data = sql_dict_posts[pid]
|
74 |
+
else:
|
75 |
+
# print(f"[W] {pid}, {stack_folder.name} appears to be a partial?")
|
76 |
+
post_data = sql_dict_posts[pid] = {}
|
77 |
+
post_data["partial"] = True
|
78 |
+
if p_type in [4, 7]:
|
79 |
+
post_data["title"] = urllib.parse.unquote(
|
80 |
+
elem.get("Text", "")
|
81 |
+
).replace("\r\n", "\n")
|
82 |
+
elif p_type in [5, 8]:
|
83 |
+
post_data["body"] = urllib.parse.unquote(
|
84 |
+
elem.get("Text", "")
|
85 |
+
).replace("\r\n", "\n")
|
86 |
+
elif p_type in [6, 9]:
|
87 |
+
post_data["tags"] = urllib.parse.unquote(
|
88 |
+
elem.get("Text", "")
|
89 |
+
).replace("\r\n", "\n")
|
90 |
+
sql_dict_posts[pid] = post_data
|
91 |
+
pbar.update(1)
|
92 |
+
elif p_type == 35:
|
93 |
+
if pid in sql_dict_posts:
|
94 |
+
post_data = sql_dict_posts[pid]
|
95 |
+
else:
|
96 |
+
print(
|
97 |
+
f"[W] {pid}, {stack_folder.name} requested to be redirected but redirect doesn't seem to exist?"
|
98 |
+
)
|
99 |
+
post_data = {}
|
100 |
+
if not post_data.get("partial", False):
|
101 |
+
print(f"[I] {pid}, {stack_folder.name} Not partial?")
|
102 |
+
return
|
103 |
+
print(post_data)
|
104 |
+
elif p_type == 10:
|
105 |
+
if pid not in sql_dict_posts:
|
106 |
+
return
|
107 |
+
post_data = sql_dict_posts[pid]
|
108 |
+
if not post_data:
|
109 |
+
raise Exception
|
110 |
+
post_data["closed"] = True
|
111 |
+
sql_dict_posts[pid] = post_data
|
112 |
+
pbar.update(1)
|
113 |
+
elif p_type == 11:
|
114 |
+
if pid not in sql_dict_posts:
|
115 |
+
return
|
116 |
+
post_data = sql_dict_posts[pid]
|
117 |
+
if not post_data:
|
118 |
+
raise Exception
|
119 |
+
post_data["closed"] = False
|
120 |
+
sql_dict_posts[pid] = post_data
|
121 |
+
pbar.update(1)
|
122 |
+
elif p_type == 12:
|
123 |
+
if pid not in sql_dict_posts:
|
124 |
+
return
|
125 |
+
post_data = sql_dict_posts[pid]
|
126 |
+
if not post_data:
|
127 |
+
raise Exception
|
128 |
+
post_data["deleted"] = True
|
129 |
+
sql_dict_posts[pid] = post_data
|
130 |
+
pbar.update(1)
|
131 |
+
elif p_type == 13:
|
132 |
+
if pid not in sql_dict_posts:
|
133 |
+
return
|
134 |
+
post_data = sql_dict_posts[pid]
|
135 |
+
if not post_data:
|
136 |
+
raise Exception
|
137 |
+
post_data["deleted"] = False
|
138 |
+
sql_dict_posts[pid] = post_data
|
139 |
+
pbar.update(1)
|
140 |
+
elif p_type == 14:
|
141 |
+
if pid not in sql_dict_posts:
|
142 |
+
return
|
143 |
+
post_data = sql_dict_posts[pid]
|
144 |
+
if not post_data:
|
145 |
+
raise Exception
|
146 |
+
post_data["locked"] = True
|
147 |
+
sql_dict_posts[pid] = post_data
|
148 |
+
pbar.update(1)
|
149 |
+
elif p_type == 15:
|
150 |
+
if pid not in sql_dict_posts:
|
151 |
+
return
|
152 |
+
post_data = sql_dict_posts[pid]
|
153 |
+
if not post_data:
|
154 |
+
raise Exception
|
155 |
+
post_data["locked"] = False
|
156 |
+
sql_dict_posts[pid] = post_data
|
157 |
+
pbar.update(1)
|
158 |
+
# print(etree.tostring(element))
|
159 |
+
if pbar.n % 5000 == 0 and pbar.n != 0:
|
160 |
+
# print(sql_dict_posts.conn.reqs.qsize())
|
161 |
+
sql_dict_posts.commit()
|
162 |
+
# elem.clear()
|
163 |
+
|
164 |
+
fast_iter(ctx, element_processor2)
|
165 |
+
except Exception as e:
|
166 |
+
print("[!] ERR: ", traceback.format_exception(e))
|
167 |
+
flagged = set()
|
168 |
+
for k, v in sql_dict_posts.items():
|
169 |
+
if not v.get("body").strip():
|
170 |
+
flagged.add(k)
|
171 |
+
if v.get("deleted", False):
|
172 |
+
flagged.add(k)
|
173 |
+
for fg in flagged:
|
174 |
+
del sql_dict_posts[fg]
|
175 |
+
sql_dict_posts.sync()
|
176 |
+
print("Reconstruct done!")
|
177 |
+
|
178 |
+
posts = stack_folder / "Posts.xml"
|
179 |
+
if not posts.exists():
|
180 |
+
raise Exception()
|
181 |
+
# reconstruct = {}
|
182 |
+
main_qn_posts = {}
|
183 |
+
try:
|
184 |
+
# Prepare all the Main Question posts
|
185 |
+
with tqdm.tqdm() as bar:
|
186 |
+
ctx2 = etree.iterparse(posts, tag="row")
|
187 |
+
|
188 |
+
def element_processor(element):
|
189 |
+
pid = element.get("Id")
|
190 |
+
p_type = int(element.get("PostTypeId"))
|
191 |
+
|
192 |
+
if p_type == 1:
|
193 |
+
if pid not in sql_dict_posts:
|
194 |
+
main_qn_posts[pid] = {"Stub":True}
|
195 |
+
print(
|
196 |
+
f"[!] Question: {int(element.get('Id'))} {stack_folder.name} does not exist but referanced?"
|
197 |
+
)
|
198 |
+
return
|
199 |
+
main_qn_posts[pid] = sql_dict_posts[pid]
|
200 |
+
main_qn_posts[pid]["Stub"] = False
|
201 |
+
main_qn_posts[pid]["Id"] = pid
|
202 |
+
main_qn_posts[pid]["CreationDate"] = element.get("CreationDate")
|
203 |
+
main_qn_posts[pid]["Score"] = int(element.get("Score"))
|
204 |
+
main_qn_posts[pid]["Accepted"] = (
|
205 |
+
int(element.get("AcceptedAnswerId"))
|
206 |
+
if element.get("AcceptedAnswerId")
|
207 |
+
else None
|
208 |
+
)
|
209 |
+
main_qn_posts[pid]["Counts"] = {
|
210 |
+
"Views": int(element.get("ViewCount"))
|
211 |
+
if element.get("ViewCount")
|
212 |
+
else 0,
|
213 |
+
"Answers": int(element.get("AnswerCount"))
|
214 |
+
if element.get("AnswerCount")
|
215 |
+
else 0,
|
216 |
+
"Comments": int(element.get("CommentCount"))
|
217 |
+
if element.get("CommentCount")
|
218 |
+
else 0,
|
219 |
+
}
|
220 |
+
bar.update(1)
|
221 |
+
|
222 |
+
fast_iter(ctx2, element_processor)
|
223 |
+
except Exception as e:
|
224 |
+
print("[!] ERR: ", traceback.format_exception(e))
|
225 |
+
|
226 |
+
print("2nd Pass Posts Done")
|
227 |
+
# Match all the Answer posts
|
228 |
+
try:
|
229 |
+
with tqdm.tqdm() as bar:
|
230 |
+
ctx3 = etree.iterparse(posts, tag="row")
|
231 |
+
|
232 |
+
def element_processor3(element):
|
233 |
+
pid = element.get("Id")
|
234 |
+
p_type = int(element.get("PostTypeId"))
|
235 |
+
if p_type == 2:
|
236 |
+
parent_id = element.get("ParentId")
|
237 |
+
if parent_id not in main_qn_posts:
|
238 |
+
print(
|
239 |
+
f"[!] Answer: {int(element.get('Id'))} {stack_folder.name} has no parent attached to it!"
|
240 |
+
)
|
241 |
+
return
|
242 |
+
answers = main_qn_posts[parent_id].setdefault("answers", [])
|
243 |
+
if pid in sql_dict_posts:
|
244 |
+
rec_answer = sql_dict_posts[pid]
|
245 |
+
else:
|
246 |
+
rec_answer = None
|
247 |
+
if rec_answer is None:
|
248 |
+
print(
|
249 |
+
f"[!] Answer: {int(element.get('Id'))} {stack_folder.name} does not have a PostHistory.xml Assoc. with it!"
|
250 |
+
)
|
251 |
+
main_qn_posts[parent_id]["answers"] = answers
|
252 |
+
return
|
253 |
+
rec_answer["Id"] = int(element.get("Id"))
|
254 |
+
rec_answer["Score"] = (
|
255 |
+
int(element.get("Score")) if element.get("Score") else 0
|
256 |
+
)
|
257 |
+
rec_answer["Counts"] = {
|
258 |
+
"Views": int(element.get("ViewCount"))
|
259 |
+
if element.get("ViewCount")
|
260 |
+
else 0,
|
261 |
+
"Comments": int(element.get("CommentCount"))
|
262 |
+
if element.get("CommentCount")
|
263 |
+
else 0,
|
264 |
+
}
|
265 |
+
answers.append(rec_answer)
|
266 |
+
main_qn_posts[parent_id]["answers"] = answers
|
267 |
+
|
268 |
+
fast_iter(ctx3, element_processor3)
|
269 |
+
except Exception as e:
|
270 |
+
print("[!] ERR: ", traceback.format_exception(e))
|
271 |
+
print("2nd Pass done!")
|
272 |
+
try:
|
273 |
+
with open(output_file, "wb") as f:
|
274 |
+
for k, v in main_qn_posts.items():
|
275 |
+
f.write(orjson.dumps(v) + b"\n")
|
276 |
+
sql_dict_posts.close()
|
277 |
+
pathlib.Path(f"tmp/{stack_folder.name}.hist.tmp.sqlite").unlink()
|
278 |
+
except Exception as e:
|
279 |
+
print("[!] ERR: ", traceback.format_exception(e))
|
280 |
+
|
281 |
+
|
282 |
+
@app.command()
|
283 |
+
def convo_processor(base_file: pathlib.Path, qa_file: pathlib.Path):
|
284 |
+
with open(qa_file, "wb") as fw:
|
285 |
+
with open(base_file, "rb") as f:
|
286 |
+
for line in f:
|
287 |
+
data = orjson.loads(line)
|
288 |
+
if not data.get("body") or not data.get("title"):
|
289 |
+
continue
|
290 |
+
if data["body"].lower().startswith(data["title"].lower()):
|
291 |
+
question = f'{data["title"].strip()}\n\n{data["body"][len(data["title"]) :].strip()}'
|
292 |
+
else:
|
293 |
+
question = f'{data["title"].strip()}\n\n{data["body"].strip()}'
|
294 |
+
if data.get("answers"):
|
295 |
+
for answer in data.get("answers"):
|
296 |
+
if "Score" not in answer:
|
297 |
+
print("no score?",data)
|
298 |
+
continue
|
299 |
+
if answer["Score"] >= 0:
|
300 |
+
zz = {
|
301 |
+
"conversation": [
|
302 |
+
{"sender": "user", "message": question},
|
303 |
+
{"sender": "assistant", "message": answer},
|
304 |
+
],
|
305 |
+
"meta": {
|
306 |
+
"q_score": data["Score"],
|
307 |
+
"a_score": answer["Score"],
|
308 |
+
"s_score": data["Score"] + answer["Score"]
|
309 |
+
},
|
310 |
+
}
|
311 |
+
fw.write(orjson.dumps(zz) + b"\n")
|
312 |
+
pass
|
313 |
+
|
314 |
+
|
315 |
+
@app.command()
|
316 |
+
def qa_processor(base_file: pathlib.Path, qa_file: pathlib.Path, formats: pathlib.Path):
|
317 |
+
question_formats = orjson.loads(formats.read_bytes())
|
318 |
+
with open(qa_file, "wb") as fw:
|
319 |
+
with open(base_file, "rb") as f:
|
320 |
+
for line in f:
|
321 |
+
data = orjson.loads(line)
|
322 |
+
if not data.get("body") or not data.get("title"):
|
323 |
+
continue
|
324 |
+
if data["body"].lower().startswith(data["title"].lower()):
|
325 |
+
question = f'{data["title"].strip()}\n\n{data["body"][len(data["title"]) :].strip()}'
|
326 |
+
else:
|
327 |
+
question = f'{data["title"].strip()}\n\n{data["body"].strip()}'
|
328 |
+
if data.get("answers"):
|
329 |
+
for answer in data.get("answers"):
|
330 |
+
if answer["Score"] >= 0:
|
331 |
+
fmt = random.choice(question_formats)
|
332 |
+
fmt = fmt.format(question=question, answer=answer["body"])
|
333 |
+
zz = {
|
334 |
+
"text": fmt,
|
335 |
+
"meta": {
|
336 |
+
"q_score": data["Score"],
|
337 |
+
"a_score": answer["Score"],
|
338 |
+
},
|
339 |
+
}
|
340 |
+
fw.write(orjson.dumps(zz) + b"\n")
|
341 |
+
pass
|
342 |
+
|
343 |
+
def err_handler(e):
|
344 |
+
traceback.print_exception(e)
|
345 |
+
|
346 |
+
@app.command()
|
347 |
+
def convo_stackstack(folder: pathlib.Path):
|
348 |
+
with multiprocessing.Pool(processes=64) as pool:
|
349 |
+
fn = []
|
350 |
+
for item in folder.iterdir():
|
351 |
+
if item.is_file() and item.suffix.endswith("jsonl"):
|
352 |
+
# print(item)
|
353 |
+
fn.append(
|
354 |
+
pool.apply_async(
|
355 |
+
convo_processor,
|
356 |
+
(
|
357 |
+
item,
|
358 |
+
pathlib.Path("convo")
|
359 |
+
/ pathlib.Path(f"{item.stem}_convo.jsonl")
|
360 |
+
),
|
361 |
+
error_callback=err_handler)
|
362 |
+
)
|
363 |
+
for f in fn:
|
364 |
+
f.wait()
|
365 |
+
pool.close()
|
366 |
+
pool.join
|
367 |
+
|
368 |
+
@app.command()
|
369 |
+
def qa_stackstack(folder: pathlib.Path):
|
370 |
+
with multiprocessing.Pool(processes=64) as pool:
|
371 |
+
fn = []
|
372 |
+
for item in folder.iterdir():
|
373 |
+
if item.is_file() and item.suffix.endswith("jsonl"):
|
374 |
+
# print(item)
|
375 |
+
fn.append(
|
376 |
+
pool.apply_async(
|
377 |
+
qa_processor,
|
378 |
+
(
|
379 |
+
item,
|
380 |
+
pathlib.Path("qa")
|
381 |
+
/ pathlib.Path(f"{item.stem}_qa.jsonl"),
|
382 |
+
pathlib.Path("staccato_format.json")
|
383 |
+
),
|
384 |
+
error_callback=err_handler)
|
385 |
+
)
|
386 |
+
for f in fn:
|
387 |
+
f.wait()
|
388 |
+
pool.close()
|
389 |
+
pool.join()
|
390 |
+
|
391 |
+
|
392 |
+
@app.command()
|
393 |
+
def stackstack(folder: pathlib.Path):
|
394 |
+
with multiprocessing.Pool(processes=64) as pool:
|
395 |
+
fn = []
|
396 |
+
for item in folder.iterdir():
|
397 |
+
if item.is_dir() and "meta." not in item.name:
|
398 |
+
# print(item)
|
399 |
+
fn.append(
|
400 |
+
pool.apply_async(
|
401 |
+
decadence,
|
402 |
+
(
|
403 |
+
item,
|
404 |
+
pathlib.Path("parsed")
|
405 |
+
/ pathlib.Path(f"{item.name}_raw.jsonl"),
|
406 |
+
),
|
407 |
+
)
|
408 |
+
)
|
409 |
+
for f in fn:
|
410 |
+
f.wait()
|
411 |
+
pool.close()
|
412 |
+
pool.join()
|
413 |
+
|
414 |
+
|
415 |
+
app()
|