|
import datasets |
|
import json |
|
from PIL import Image |
|
|
|
|
|
def train_data_format(json_to_dict: list): |
|
final_list = [] |
|
count = 0 |
|
for item in json_to_dict: |
|
count = count + 1 |
|
|
|
test_dict = {"id": int, "tokens": [], "bboxes": [], "ner_tags": []} |
|
|
|
test_dict["id"] = count |
|
|
|
|
|
test_dict["image"] = Image.open(item["file_name"]).convert("RGB") |
|
|
|
for cont in item["annotations"]: |
|
test_dict["tokens"].append(cont["text"]) |
|
test_dict["bboxes"].append(cont["box"]) |
|
test_dict["ner_tags"].append(cont["label"]) |
|
|
|
final_list.append(test_dict) |
|
|
|
return final_list |
|
|
|
|
|
def read_json(json_path: str) -> dict: |
|
with open(json_path, "r") as fp: |
|
data = json.loads(fp.read()) |
|
return data |
|
|
|
|
|
class MyDataset(datasets.GeneratorBasedBuilder): |
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"image": datasets.Image(), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"bboxes": datasets.Sequence( |
|
datasets.Sequence(datasets.Value("int32")) |
|
), |
|
"ner_tags": datasets.Sequence( |
|
datasets.ClassLabel( |
|
num_classes=3, |
|
names=["Other", "Patient_name", "Patient_address"], |
|
) |
|
), |
|
} |
|
) |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": "Training_layoutLMV3.json", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": "Training_layoutLMV3.json", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
|
|
|
|
|
|
for id_, row in enumerate(train_data_format(read_json(filepath))): |
|
yield id_, row |
|
|