fix binary memory leakage
Browse files- DUDE_imdb_loader.py +1 -1
- DUDE_loader.py +2 -1
DUDE_imdb_loader.py
CHANGED
@@ -363,7 +363,7 @@ if __name__ == "__main__":
|
|
363 |
save_json(documents_ocr_filename, documents_ocr_info)
|
364 |
|
365 |
imdb = create_imdb_from_json(
|
366 |
-
dataset[split],
|
367 |
documents_metadata=documents_metadata,
|
368 |
documents_ocr_info=documents_ocr_info,
|
369 |
split=split,
|
|
|
363 |
save_json(documents_ocr_filename, documents_ocr_info)
|
364 |
|
365 |
imdb = create_imdb_from_json(
|
366 |
+
dataset[split], # .select(split_indices),
|
367 |
documents_metadata=documents_metadata,
|
368 |
documents_ocr_info=documents_ocr_info,
|
369 |
split=split,
|
DUDE_loader.py
CHANGED
@@ -222,9 +222,10 @@ class DUDE(datasets.GeneratorBasedBuilder):
|
|
222 |
annotations = [x for x in annotations if x["data_split"] == split]
|
223 |
|
224 |
for i, a in enumerate(annotations):
|
225 |
-
a["data_split"] = split
|
226 |
if a["docId"] in SKIP_DOC_IDS:
|
227 |
continue
|
|
|
|
|
228 |
a["answers_page_bounding_boxes"] = parse_bbox(a["answers_page_bounding_boxes"])
|
229 |
docpath = retrieve_doc(a["docId"])
|
230 |
ocrpath = retrieve_OCR(a["docId"])
|
|
|
222 |
annotations = [x for x in annotations if x["data_split"] == split]
|
223 |
|
224 |
for i, a in enumerate(annotations):
|
|
|
225 |
if a["docId"] in SKIP_DOC_IDS:
|
226 |
continue
|
227 |
+
a = dict(a)
|
228 |
+
a["data_split"] = split
|
229 |
a["answers_page_bounding_boxes"] = parse_bbox(a["answers_page_bounding_boxes"])
|
230 |
docpath = retrieve_doc(a["docId"])
|
231 |
ocrpath = retrieve_OCR(a["docId"])
|