fix RAM issue
Browse filesDon't add the binary to the dictionaries in `annotations`. Instead, use a copy of the dictionary that can be garbage collected after each `yield`
- DUDE_loader.py +2 -1
DUDE_loader.py
CHANGED
@@ -222,9 +222,10 @@ class DUDE(datasets.GeneratorBasedBuilder):
|
|
222 |
annotations = [x for x in annotations if x["data_split"] == split]
|
223 |
|
224 |
for i, a in enumerate(annotations):
|
225 |
-
a["data_split"] = split
|
226 |
if a["docId"] in SKIP_DOC_IDS:
|
227 |
continue
|
|
|
|
|
228 |
a["answers_page_bounding_boxes"] = parse_bbox(a["answers_page_bounding_boxes"])
|
229 |
docpath = retrieve_doc(a["docId"])
|
230 |
ocrpath = retrieve_OCR(a["docId"])
|
|
|
222 |
annotations = [x for x in annotations if x["data_split"] == split]
|
223 |
|
224 |
for i, a in enumerate(annotations):
|
|
|
225 |
if a["docId"] in SKIP_DOC_IDS:
|
226 |
continue
|
227 |
+
a = dict(a)
|
228 |
+
a["data_split"] = split
|
229 |
a["answers_page_bounding_boxes"] = parse_bbox(a["answers_page_bounding_boxes"])
|
230 |
docpath = retrieve_doc(a["docId"])
|
231 |
ocrpath = retrieve_OCR(a["docId"])
|