File size: 12,573 Bytes
0c8d55e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
from typing import Any, Callable, Optional, List

import torch
from transformers import PreTrainedTokenizer
from torch.utils.data import Dataset
from tqdm import tqdm
import json
import os
from PIL import Image
from univa.utils.prompter import Prompter
import numpy as np
from einops import rearrange
import random
from univa.utils.constant import SPACIAL_TOKEN, GENERATE_TOKEN

class LlavaDataset(Dataset):
    def __init__(
        self,
        dataset_type: str,
        data_txt: str,
        tokenizer: PreTrainedTokenizer,
        prompter: Prompter,
        image_processor: Callable,
        processor: Callable = None,
        min_pixels: int = 384*384, 
        max_pixels: int = 384*384, 
        image_token_length: int = 729,
        only_generated_task: bool = False,
        drop_prompt_rate: float = 0.2,
    ):
        assert dataset_type == 'llava'
        with open(data_txt, "r") as f:
            self.datasets = [line.strip() for line in f.readlines()]

        self.data = []
        self._load_data()
        self.tokenizer = tokenizer
        self.prompter = prompter
        self.image_token_length = image_token_length
        self.image_token = SPACIAL_TOKEN[dataset_type]['image_token']
        self.image_begin_token = SPACIAL_TOKEN[dataset_type]['image_begin_token']
        self.image_end_token = SPACIAL_TOKEN[dataset_type]['image_end_token']
        self.generated_image_token = GENERATE_TOKEN
        self.image_processor = image_processor

        self.only_generated_task = only_generated_task  # For denoiser training
        self.drop_prompt_rate = drop_prompt_rate
        if self.drop_prompt_rate > 0:
            assert self.only_generated_task, (
                "Only generated task is supported when drop prompt rate is greater than 0"
            )

        # Add image token if not exists.
        if self.image_token not in self.tokenizer.get_vocab():
            self.tokenizer.add_special_tokens(
                {"additional_special_tokens": [self.image_token]}
            )
        self.image_token_id = self.tokenizer.convert_tokens_to_ids(self.image_token)

        self.image_begin_token_id = self.tokenizer.convert_tokens_to_ids(
            self.image_begin_token
        )
        assert isinstance(self.image_begin_token_id, int), (
            f"tokenizer miss image begin token `{self.image_begin_token}`"
        )
        self.image_end_token_id = self.tokenizer.convert_tokens_to_ids(
            self.image_end_token
        )
        assert isinstance(self.image_end_token_id, int), (
            f"tokenizer miss image end token `{self.image_end_token}`"
        )

    def _load_data(self):
        for dataset in self.datasets:
            image_root, json_file = dataset.split(",")

            # Load json file
            with open(json_file, "r") as f:
                data = json.load(f)

            dataset_data = []
            for line in tqdm(data):
                # Ensure `image` is a list
                if isinstance(line["image"], str):
                    line["image"] = [line["image"]]
                assert isinstance(line["image"], list), (
                    "`image` must be a str or a list."
                )

                # Convert image path to absolute path
                line["image"] = [
                    os.path.join(image_root, image_path) for image_path in line["image"]
                ]

                dataset_data.append(line)

            print(f"Load {len(dataset_data)} data from {json_file}.")
            self.data.extend(dataset_data)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        try:
            data: Any = self.data[idx]

            # Reformat the conversation to the format of prompter
            conversations = []
            prompt = ""
            for item in data["conversations"]:
                if item["from"] == "human":
                    role = self.prompter.user_role
                elif item["from"] == "gpt":
                    role = self.prompter.assistant_role
                else:
                    raise ValueError(f"Unknown role: {item['from']}")
                conversations.append({"from": role, "value": item["value"]})
            assert prompt != ""

            # Make prompt
            drop_condition = False
            if self.only_generated_task:
                if self.drop_prompt_rate < random.random():  # Randomly drop the prompt
                    prompt_list = self.prompter.get_train_prompt(conversations)
                else:
                    drop_condition = True
                    # Drop the prompt
                    prompt_list = [
                        {
                            "from": self.prompter.system_role,
                            "value": "You are a helpful assistant.",
                        },
                        {
                            "from": self.prompter.user_role,
                            "value": "Generate an image.",
                        },
                        {
                            "from": self.prompter.assistant_role,
                            "value": self.generated_image_token,
                        },
                    ]
                    prompt_list = self.prompter.get_train_prompt(prompt_list)
            else:
                prompt_list = self.prompter.get_train_prompt(conversations)
                
            input_ids = []
            labels = []
            has_generated_image = False
            for item in prompt_list:
                item["prompt"] = item["prompt"].replace('<image>', self.image_token)
                if self.generated_image_token in item["prompt"]:  # Check if self.generated_image_token in prompt
                    assert item["from"] == self.prompter.assistant_role, (
                        "Generated image token must be in assistant role"
                    )
                    assert (
                        f"{self.generated_image_token}{self.prompter.eos_token}"
                        in item["prompt"]
                    ), "Generated image token must in end of prompt"

                    # Replace the generated image token with image begin token and without eos token
                    item["prompt"] = item["prompt"].replace(
                        f"{self.generated_image_token}{self.prompter.eos_token}",
                        self.image_begin_token,
                    )
                    has_generated_image = True

                tokenized_item = self.tokenizer(
                    item["prompt"],
                    return_tensors="pt",
                    truncation=False,
                )
                if item["is_labels"]:  # If this prompt is labels
                    labels.append(tokenized_item.input_ids)
                else:
                    labels.append(torch.full_like(tokenized_item.input_ids, -100))
                input_ids.append(tokenized_item.input_ids)

            if (
                self.only_generated_task and not has_generated_image
            ):  # For denoiser training
                raise ValueError(
                    f"Only generated task is not supported. But this prompt not contains generated image token: {prompt_list[0]['prompt']}"
                )

            input_ids = torch.cat(input_ids, dim=1)
            labels = torch.cat(labels, dim=1)

            # Load images
            if has_generated_image:
                if not drop_condition:
                    image_slice = data["image"][:-1]
                else:
                    image_slice = []
            else:
                image_slice = data["image"]
            image_dict = self._load_image(image_slice, image_processor=self.image_processor, image_token_lengths=self.image_token_length)
            image_token_lengths = image_dict['image_token_lengths']
            pixel_values = image_dict['pixel_values']
            image_grid_thw = image_dict['image_grid_thw']


            # Repeat the image token to the length of image_token_length 
            # and record the position of image tokens.
            input_ids, labels, image_position = self._process_image_token(
                input_ids,
                labels=labels,
                image_token_id=self.image_token_id,
                image_begin_token_id=self.image_begin_token_id,
                image_end_token_id=self.image_end_token_id,
                image_token_lengths=image_token_lengths,
            )

            return_data = {
                "input_ids": input_ids,
                "labels": labels,
                "pixel_values": pixel_values,
                "image_position": image_position,
                "image_grid_thw": image_grid_thw, 
                "prompt": [prompt],
            }

            if has_generated_image: # If this item is a generation task
                image = Image.open(data["image"][-1]).convert("RGB")
                image_tensor = torch.tensor(np.array(image)) / 255.0  # scale to 0-1
                image_tensor = rearrange(image_tensor, "h w c -> c h w")
                return_data["generated_image"] = image_tensor

            return return_data
        except Exception as e:
            print(f'Error with {e}')
            return self.__getitem__(random.randint(0, self.__len__()-1))

    @staticmethod
    def _load_image(
        image_slice: List[str],
        max_pixels: int = 448*448,  
        min_pixels: int = 448*448, 
        processor: Callable = None, 
        image_processor: Callable = None, 
        image_token_lengths: int = 729, 
        image_token: str = '<image>', 
    ):
        # images tensor shape is (b, c, h, w)
        images = []
        # Ignore the last image (generated image)
        for image_path in image_slice: # Ignore the last image (generated image)
            image = Image.open(image_path).convert("RGB")
            image = image_processor(
                image, return_tensors="pt"
            ).pixel_values
            images.append(image)
        if len(images) > 0:
            images = torch.cat(images)
        image_token_lengths = len(images) * [image_token_lengths]
        return {'pixel_values': images, 'image_grid_thw': [], 'image_token_lengths': image_token_lengths}
    
    @staticmethod
    def _process_image_token(
        input_ids: torch.Tensor,
        image_token_id: int,
        image_begin_token_id: int,
        image_end_token_id: int,
        image_token_lengths: List[int],
        labels: Optional[torch.Tensor] = None,
    ):
        # Find the indices of the image token
        image_token_indices = (input_ids == image_token_id).nonzero(as_tuple=True)
        image_position = []
        offset = 0
        cur_i = 0
        if isinstance(image_token_lengths, int):
            image_token_lengths = [image_token_lengths] * len(image_token_indices[1])
        for idx in image_token_indices[1]:
            image_token_length = image_token_lengths[cur_i]
            adjusted_idx = idx + offset
            assert input_ids[0, adjusted_idx] == image_token_id

            # Add image begin and end token
            input_ids = torch.cat(
                [
                    input_ids[:, :adjusted_idx],
                    input_ids.new_full(
                        (1, 1), image_begin_token_id
                    ),  # image begin token
                    input_ids.new_full(
                        (1, image_token_length), image_token_id
                    ),  # Repeat the image token to the length of image_token_length
                    input_ids.new_full((1, 1), image_end_token_id),  # image end token
                    input_ids[:, adjusted_idx + 1 :],
                ],
                dim=1,
            )
            if labels is not None:
                labels = torch.cat(
                    [
                        labels[:, :adjusted_idx],
                        labels.new_full(
                            (1, 1), image_begin_token_id
                        ),  # Make begin token as label
                        labels.new_full((1, image_token_length), -100),
                        labels.new_full((1, 1), -100),
                        labels[:, adjusted_idx + 1 :],
                    ],
                    dim=1,
                )

            adjusted_idx += 1  # skip the image begin token
            image_position.append(adjusted_idx.item())
            offset += image_token_length - 1
            offset += 2  # begin and end token

        return input_ids, labels, image_position