changed tokenization
Browse files- ref_seg_ger.py +12 -12
ref_seg_ger.py
CHANGED
@@ -62,7 +62,7 @@ _LABELS = [
|
|
62 |
|
63 |
_FEATURES = datasets.Features(
|
64 |
{
|
65 |
-
|
66 |
"tokens": datasets.Sequence(datasets.Value("string")),
|
67 |
# "attention_mask": datasets.Sequence(datasets.Value("int64")),
|
68 |
# "bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
@@ -81,16 +81,16 @@ _FEATURES = datasets.Features(
|
|
81 |
)
|
82 |
|
83 |
|
84 |
-
def load_image(image_path, size=None):
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
|
95 |
|
96 |
# def normalize_bbox(bbox, size):
|
@@ -303,7 +303,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
303 |
# print(split_ids, len(split_ids))
|
304 |
|
305 |
yield key, {
|
306 |
-
|
307 |
'tokens': clean_input_ids,
|
308 |
# 'attention_mask': [1] * len(chunk_ids),
|
309 |
# "bbox": split_bboxes,
|
|
|
62 |
|
63 |
_FEATURES = datasets.Features(
|
64 |
{
|
65 |
+
"id": datasets.Value("string"),
|
66 |
"tokens": datasets.Sequence(datasets.Value("string")),
|
67 |
# "attention_mask": datasets.Sequence(datasets.Value("int64")),
|
68 |
# "bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
|
|
81 |
)
|
82 |
|
83 |
|
84 |
+
# def load_image(image_path, size=None):
|
85 |
+
# image = Image.open(image_path).convert("RGB")
|
86 |
+
# w, h = image.size
|
87 |
+
# if size is not None:
|
88 |
+
# # resize image
|
89 |
+
# image = image.resize((size, size))
|
90 |
+
# image = np.asarray(image)
|
91 |
+
# image = image[:, :, ::-1] # flip color channels from RGB to BGR
|
92 |
+
# image = image.transpose(2, 0, 1) # move channels to first dimension
|
93 |
+
# return image, (w, h)
|
94 |
|
95 |
|
96 |
# def normalize_bbox(bbox, size):
|
|
|
303 |
# print(split_ids, len(split_ids))
|
304 |
|
305 |
yield key, {
|
306 |
+
"id": f"{os.path.basename(f)}",
|
307 |
'tokens': clean_input_ids,
|
308 |
# 'attention_mask': [1] * len(chunk_ids),
|
309 |
# "bbox": split_bboxes,
|