machuofan commited on
Commit
b97419e
·
1 Parent(s): 735672d
Files changed (5) hide show
  1. app.py +30 -43
  2. i2t.py +0 -192
  3. inference_i2t.py +0 -110
  4. t2i.py +0 -224
  5. t2i_new.py +0 -243
app.py CHANGED
@@ -1,33 +1,26 @@
1
  import time
2
- from threading import Thread
3
-
4
- import gradio as gr
5
  import torch
6
- import PIL
7
  from PIL import Image
8
- import torch
9
- from transformers import AutoTokenizer, AutoModelForCausalLM
10
- import os
11
  from tqdm import tqdm
12
- from helpers import sample, expand2square
 
 
 
13
 
14
- # from transformers import AutoProcessor, LlavaForConditionalGeneration
15
- from transformers import TextIteratorStreamer
16
- from conversation import conv_templates
17
  from model import *
18
  from unitok.config import Args
19
  from unitok.model import UniTok
20
- from mm_utils import tokenizer_image_token, get_model_name_from_path
21
- from torchvision import transforms
22
-
23
- PILtransform = transforms.ToPILImage()
24
 
 
 
 
 
25
 
26
- # import spaces
27
- # import os
28
- # os.system("pip uninstall -y gradio")
29
- # os.system("pip install gradio==4.44.1")
30
- # os.system("pip install gradio_client==1.3.0")
31
 
32
 
33
  IMAGE_TOKEN_INDEX=-200
@@ -47,18 +40,20 @@ CSS ="""
47
 
48
  title_html = """
49
  <div style="display: flex; flex-direction: column; align-items: center; gap: 10px;">
50
- <h1 style="margin: 0; line-height: 1; text-align: center;"> Liquid: Language Models are Scalable Multi-modal <br> Generators via Unified Understanding and Generation</h1>
51
  </div>
52
  """
53
 
54
  links_html = f"""
55
- <center><font size=3><a href='https://foundationvision.github.io/Liquid/'>Liquid</a> has been open-sourced on <a href='https://huggingface.co/Junfeng5/Liquid_V1_7B'>😊 Huggingface</a> and <a href='https://github.com/FoundationVision/Liquid'>🌟 GitHub</a>. If you find Liquid useful, a like❤️ or a star🌟 would be appreciated.</font></center>
56
  """
57
 
58
  introduction = f"""
59
- Liquid explores the potential of a single LLM as a multimodal generator and its scaling laws. It achieves the level of diffusion models in visual generation and discovers the mutual enhancement between understanding and generation. More details can be found on the project <a href='https://foundationvision.github.io/Liquid/'> homepage</a> and in the <a href='https://arxiv.org/abs/2412.04332'> paper</a>. """
 
60
 
61
- ckpt = torch.load(r'D:\projects\liquid_app\UniTok\UniTok_weights\unitok_tokenizer\unitok_tokenizer.pth', map_location='cpu')
 
62
  vae_cfg = Args()
63
  vae_cfg.load_state_dict(ckpt['args'])
64
  vq_model = UniTok(vae_cfg)
@@ -66,13 +61,12 @@ vq_model.load_state_dict(ckpt['trainer']['unitok'])
66
  vq_model.to('cuda')
67
  vq_model.eval()
68
 
 
 
 
 
 
69
 
70
- tokenizer = AutoTokenizer.from_pretrained(r'C:\debug_ckpts\unitok_mllm', padding_side='left')
71
- vqllm = MiniGeminiLlamaForCausalLM.from_pretrained(
72
- r'C:\debug_ckpts\unitok_mllm',
73
- attn_implementation='flash_attention_2',
74
- torch_dtype=torch.bfloat16
75
- ).to('cuda')
76
  num_codebooks = vae_cfg.num_codebooks
77
 
78
  # @spaces.GPU
@@ -149,14 +143,12 @@ def bot_streaming_I2T(message, history):
149
  yield generated_text
150
 
151
 
152
-
153
  def show_gallery(images):
154
  gallery = gr.Gallery(images, label="Gallery", columns=4, height="auto",preview=True,scale=0.05) # 设置两行两列的布局
155
  return gallery
156
 
157
  # @spaces.GPU
158
  def bot_streaming_T2I(message, history,guidance_scale, temperature, top_K, top_P):
159
-
160
  global stop_flag
161
  stop_flag = True
162
  time.sleep(0.2)
@@ -175,9 +167,7 @@ def bot_streaming_T2I(message, history,guidance_scale, temperature, top_K, top_P
175
  model_inputs = tokenizer(text_inputs + uncondition_text_inputs, return_tensors="pt", padding=True).to('cuda')
176
  else:
177
  model_inputs = tokenizer(text_inputs, return_tensors="pt", padding=True).to('cuda')
178
- model_kwargs = {'attention_mask':model_inputs.pop('attention_mask'),
179
- 'use_cache': True
180
- }
181
  input_ids = model_inputs.pop('input_ids')
182
  batch_size, cur_len = input_ids.shape
183
  if "inputs_embeds" in model_kwargs:
@@ -239,7 +229,6 @@ def bot_streaming_T2I(message, history,guidance_scale, temperature, top_K, top_P
239
  del model_kwargs
240
  # image_vq_id = input_ids[:,prompt_length:prompt_length+256]-ori_vocabe_size
241
  image_vq_id = torch.stack(pred_tokens, dim=-1)[:ori_batchsize]
242
-
243
 
244
  generated_image_list = []
245
  rec_images = vq_model.idx_to_img(image_vq_id)
@@ -284,10 +273,10 @@ with gr.Blocks(fill_height=True) as demo:
284
  ["Portrait of an asian woman. She has pink violet hair style with modern complex hairdressing. The background is dark with cyberpunk neon lights. Inspired by Cyberpunk 2077 and Blade Runner. Ultra realistic picture. To capture the image, you will use a fullframe DSLR or mirrorless camera with a highresolution sensor, an aperture of f2.8 or wider, and a shutter speed of 1500 second or faster. You will use natural light and reflectors to create a balanced and welllit image, and will experiment with different angles and compositions to create the most i",5.0, 0.9,4096,0.99],
285
  ["female character fantasy world, for fantasy story, protagonist, interesting and detailed clothes, beautiful, medieval fantasy cinematic shot photo taken by canon, photo taken by fuji, photo taken by kodak incredibly detailed, sharpen, details professional lighting , film lighting 350mm lightroom cinematography, hyper realism, cinematic, film quality",5.0, 0.9,4096,0.99],
286
  ["strawberries splashing, swirling liquid, realism, octane render, raytracing",5.0, 0.9,4096,0.99],
287
- ["hedgehog face, floating in space, wearing space suit no helmet, cinematic, 50mm f1.8, unreal engine 5",5.0, 0.9,4096,0.99],
288
- ["artificial intelligence, revolution, publishing, writer, hyperrealistic",5.0, 0.9,4096,0.99],
289
- ["A pig dressed as a mason, by Bill Gekas",5.0, 0.9,4096,0.99],
290
- ],
291
  stop_btn="Stop Generation",
292
  additional_inputs = [guidance_scale, temperature, top_K, top_P],
293
  additional_inputs_accordion="⚙️ Advanced Settings",
@@ -300,15 +289,13 @@ with gr.Blocks(fill_height=True) as demo:
300
  with gr.Tab("Image To Text"):
301
  bbb = gr.ChatInterface(
302
  fn=bot_streaming_I2T,
303
- examples=[ {"text": "How to make this pastry?", "files": ["./baklava.png"]}],
304
  description="Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
305
  stop_btn="Stop Generation",
306
  multimodal=True,
307
  textbox=chat_input_I2T,
308
  chatbot=chatbot_I2T,
309
  )
310
-
311
-
312
 
313
  demo.queue(api_open=False)
314
  demo.launch(allowed_paths=["./"],server_port=2560, share=False )
 
1
  import time
 
 
 
2
  import torch
3
+ import gradio as gr
4
  from PIL import Image
 
 
 
5
  from tqdm import tqdm
6
+ from threading import Thread
7
+ from torchvision import transforms
8
+ from huggingface_hub import hf_hub_download
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
10
 
 
 
 
11
  from model import *
12
  from unitok.config import Args
13
  from unitok.model import UniTok
14
+ from conversation import conv_templates
15
+ from mm_utils import tokenizer_image_token
16
+ from helpers import sample, expand2square
 
17
 
18
+ import os
19
+ os.system("pip uninstall -y gradio")
20
+ os.system("pip install gradio==4.44.1")
21
+ os.system("pip install gradio_client==1.3.0")
22
 
23
+ PILtransform = transforms.ToPILImage()
 
 
 
 
24
 
25
 
26
  IMAGE_TOKEN_INDEX=-200
 
40
 
41
  title_html = """
42
  <div style="display: flex; flex-direction: column; align-items: center; gap: 10px;">
43
+ <h1 style="margin: 0; line-height: 1; text-align: center;">UniTok: A Unified Tokenizer for Visual Generation and Understanding</h1>
44
  </div>
45
  """
46
 
47
  links_html = f"""
48
+ <center><font size=3><a href='https://foundationvision.github.io/Liquid/'>UniTok</a> has been open-sourced on <a href='https://huggingface.co/FoundationVision/unitok_mllm'>😊 Huggingface</a> and <a href='https://github.com/FoundationVision/UniTok'>🌟 GitHub</a>. If you find Liquid useful, a like❤️ or a star🌟 would be appreciated.</font></center>
49
  """
50
 
51
  introduction = f"""
52
+ This is a native MLLM built with UniTok, a unified visual tokenizer well-suited for both generation and understanding tasks.
53
+ More details can be found on the project <a href='https://foundationvision.github.io/UniTok/'> homepage</a> and in the <a href='https://arxiv.org/abs/2502.20321'> paper</a>. """
54
 
55
+ unitok_ckpt = hf_hub_download(repo_id='FoundationVision/unitok_tokenizer', filename='unitok_tokenizer.pth')
56
+ ckpt = torch.load('unitok_tokenizer.pth', map_location='cpu')
57
  vae_cfg = Args()
58
  vae_cfg.load_state_dict(ckpt['args'])
59
  vq_model = UniTok(vae_cfg)
 
61
  vq_model.to('cuda')
62
  vq_model.eval()
63
 
64
+ mllm_ckpt = 'FoundationVision/unitok_mllm'
65
+ tokenizer = AutoTokenizer.from_pretrained(mllm_ckpt, padding_side='left')
66
+ vqllm = MiniGeminiLlamaForCausalLM.from_pretrained(mllm_ckpt).cuda()
67
+ vqllm = vqllm.to(dtype=torch.bfloat16)
68
+ vqllm = vqllm.eval()
69
 
 
 
 
 
 
 
70
  num_codebooks = vae_cfg.num_codebooks
71
 
72
  # @spaces.GPU
 
143
  yield generated_text
144
 
145
 
 
146
  def show_gallery(images):
147
  gallery = gr.Gallery(images, label="Gallery", columns=4, height="auto",preview=True,scale=0.05) # 设置两行两列的布局
148
  return gallery
149
 
150
  # @spaces.GPU
151
  def bot_streaming_T2I(message, history,guidance_scale, temperature, top_K, top_P):
 
152
  global stop_flag
153
  stop_flag = True
154
  time.sleep(0.2)
 
167
  model_inputs = tokenizer(text_inputs + uncondition_text_inputs, return_tensors="pt", padding=True).to('cuda')
168
  else:
169
  model_inputs = tokenizer(text_inputs, return_tensors="pt", padding=True).to('cuda')
170
+ model_kwargs = {'attention_mask':model_inputs.pop('attention_mask'), 'use_cache': True}
 
 
171
  input_ids = model_inputs.pop('input_ids')
172
  batch_size, cur_len = input_ids.shape
173
  if "inputs_embeds" in model_kwargs:
 
229
  del model_kwargs
230
  # image_vq_id = input_ids[:,prompt_length:prompt_length+256]-ori_vocabe_size
231
  image_vq_id = torch.stack(pred_tokens, dim=-1)[:ori_batchsize]
 
232
 
233
  generated_image_list = []
234
  rec_images = vq_model.idx_to_img(image_vq_id)
 
273
  ["Portrait of an asian woman. She has pink violet hair style with modern complex hairdressing. The background is dark with cyberpunk neon lights. Inspired by Cyberpunk 2077 and Blade Runner. Ultra realistic picture. To capture the image, you will use a fullframe DSLR or mirrorless camera with a highresolution sensor, an aperture of f2.8 or wider, and a shutter speed of 1500 second or faster. You will use natural light and reflectors to create a balanced and welllit image, and will experiment with different angles and compositions to create the most i",5.0, 0.9,4096,0.99],
274
  ["female character fantasy world, for fantasy story, protagonist, interesting and detailed clothes, beautiful, medieval fantasy cinematic shot photo taken by canon, photo taken by fuji, photo taken by kodak incredibly detailed, sharpen, details professional lighting , film lighting 350mm lightroom cinematography, hyper realism, cinematic, film quality",5.0, 0.9,4096,0.99],
275
  ["strawberries splashing, swirling liquid, realism, octane render, raytracing",5.0, 0.9,4096,0.99],
276
+ ["hedgehog face, floating in space, wearing space suit no helmet, cinematic, 50mm f1.8, unreal engine 5",5.0, 0.9,4096,0.99],
277
+ ["artificial intelligence, revolution, publishing, writer, hyperrealistic",5.0, 0.9,4096,0.99],
278
+ ["A pig dressed as a mason, by Bill Gekas",5.0, 0.9,4096,0.99],
279
+ ],
280
  stop_btn="Stop Generation",
281
  additional_inputs = [guidance_scale, temperature, top_K, top_P],
282
  additional_inputs_accordion="⚙️ Advanced Settings",
 
289
  with gr.Tab("Image To Text"):
290
  bbb = gr.ChatInterface(
291
  fn=bot_streaming_I2T,
292
+ examples=[{"text": "How to make this pastry?", "files": ["./baklava.png"]}],
293
  description="Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
294
  stop_btn="Stop Generation",
295
  multimodal=True,
296
  textbox=chat_input_I2T,
297
  chatbot=chatbot_I2T,
298
  )
 
 
299
 
300
  demo.queue(api_open=False)
301
  demo.launch(allowed_paths=["./"],server_port=2560, share=False )
i2t.py DELETED
@@ -1,192 +0,0 @@
1
- import os
2
- import sys
3
- import json
4
- import math
5
- import torch
6
- import argparse
7
- import shortuuid
8
- from tqdm import tqdm
9
- from PIL import Image
10
- from PIL import ImageFile
11
- from torchvision import transforms
12
-
13
- from constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
14
- from conversation import conv_templates, SeparatorStyle
15
- from model.builder import load_pretrained_model
16
- from tools import disable_torch_init
17
- from mm_utils import tokenizer_image_token, get_model_name_from_path
18
- from torch.utils.data import Dataset, DataLoader
19
-
20
-
21
- from unitok.config import Args
22
- from unitok.model import UniTok
23
-
24
- ImageFile.LOAD_TRUNCATED_IMAGES = False
25
- torch.set_grad_enabled(False)
26
-
27
-
28
- def split_list(lst, n):
29
- """Split a list into n (roughly) equal-sized chunks"""
30
- chunk_size = math.ceil(len(lst) / n) # integer division
31
- return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
32
-
33
-
34
- def get_chunk(lst, n, k):
35
- chunks = split_list(lst, n)
36
- return chunks[k]
37
-
38
-
39
- # Custom dataset class
40
- class CustomDataset(Dataset):
41
- def __init__(self, questions, image_folder, tokenizer, image_processor, model_config):
42
- self.questions = questions
43
- self.image_folder = image_folder
44
- self.tokenizer = tokenizer
45
- self.image_processor = image_processor
46
- self.model_config = model_config
47
-
48
- def __getitem__(self, index):
49
- line = self.questions[index]
50
- image_file = line["image"]
51
- qs = line["text"]
52
-
53
- qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
54
-
55
- conv = conv_templates[args.conv_mode].copy()
56
- conv.append_message(conv.roles[0], qs)
57
- conv.append_message(conv.roles[1], None)
58
- prompt = conv.get_prompt()
59
- # prompt = prompt.replace('<image>','<boi><image><eoi>')
60
- # import pdb;pdb.set_trace()
61
- image = Image.open(os.path.join(self.image_folder, image_file)).convert('RGB')
62
- # import pdb;pdb.set_trace()
63
- pad_image = expand2square(image, (122, 116, 104) )
64
- # import pdb;pdb.set_trace()
65
- img = self.image_processor[0](pad_image).unsqueeze(0)
66
- img = img.to('cuda')
67
- # import pdb;pdb.set_trace()
68
- with torch.no_grad():
69
- vq_code = self.image_processor[1].img_to_idx(img)
70
- vqcode = vq_code.cpu()
71
-
72
- input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
73
-
74
-
75
- return input_ids,vqcode,os.path.join(self.image_folder, image_file) #, image_tensor, image_tensor_aux
76
-
77
- def __len__(self):
78
- return len(self.questions)
79
-
80
-
81
- # DataLoader
82
- def create_data_loader(questions, image_folder, tokenizer, image_processor, model_config, batch_size=1, num_workers=0):
83
- assert batch_size == 1, "batch_size must be 1"
84
- dataset = CustomDataset(questions, image_folder, tokenizer, image_processor, model_config)
85
- data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)
86
- return data_loader
87
-
88
- def expand2square(pil_img, background_color):
89
- width, height = pil_img.size
90
- if width == height:
91
- return pil_img
92
- elif width > height:
93
- result = Image.new(pil_img.mode, (width, width), background_color)
94
- result.paste(pil_img, (0, (width - height) // 2))
95
- return result
96
- else:
97
- result = Image.new(pil_img.mode, (height, height), background_color)
98
- result.paste(pil_img, ((height - width) // 2, 0))
99
- return result
100
-
101
- def eval_model(args):
102
- # Model
103
- disable_torch_init()
104
- model_path = os.path.expanduser(args.model_path)
105
- model_name = get_model_name_from_path(model_path)
106
- tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name, load_8bit=args.load_8bit)
107
-
108
- ckpt = torch.load(args.tokenizer_path, map_location='cpu')
109
- vae_cfg = Args()
110
- vae_cfg.load_state_dict(ckpt['args'])
111
- vq_model = UniTok(vae_cfg)
112
- vq_model.load_state_dict(ckpt['trainer']['unitok'])
113
- vq_model.to('cuda')
114
- vq_model.eval()
115
- del ckpt
116
-
117
- crop_size = 256
118
- transform = transforms.Compose([
119
- transforms.Resize((crop_size, crop_size)),
120
- transforms.ToTensor(),
121
- transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
122
- ])
123
- image_processor = (transform, vq_model)
124
-
125
- questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")]
126
- questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
127
- answers_file = os.path.expanduser(args.answers_file)
128
- os.makedirs(os.path.dirname(answers_file), exist_ok=True)
129
- ans_file = open(answers_file, "w")
130
-
131
- if 'plain' in args.conv_mode and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
132
- args.conv_mode = args.conv_mode + '_mmtag'
133
- print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
134
-
135
- data_loader = create_data_loader(questions, args.image_folder, tokenizer, image_processor, model.config)
136
-
137
- for (input_ids, image_codes,imagepath), line in tqdm(zip(data_loader, questions), total=len(questions)):
138
- idx = line["question_id"]
139
- cur_prompt = line["text"]
140
-
141
- input_ids = input_ids.to(device=model.device, non_blocking=True)
142
- image_codes = image_codes.to(device=model.device, non_blocking=True)
143
- if hasattr(model, "update_prompt"):
144
- model.update_prompt([[cur_prompt]])
145
-
146
- with torch.inference_mode():
147
- output_ids = model.generate_mllm(
148
- input_ids,
149
- images=image_codes,
150
- images_aux= None,
151
- do_sample=True if args.temperature > 0 else False,
152
- temperature=args.temperature,
153
- top_p=args.top_p,
154
- num_beams=args.num_beams,
155
- max_new_tokens=args.max_new_tokens,
156
- bos_token_id=tokenizer.bos_token_id, # Begin of sequence token
157
- eos_token_id=tokenizer.eos_token_id, # End of sequence token
158
- pad_token_id=tokenizer.pad_token_id, # Pad token
159
- use_cache=False
160
- )
161
-
162
- outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
163
- ans_id = shortuuid.uuid()
164
- ans_file.write(json.dumps({
165
- "question_id": idx,
166
- "prompt": cur_prompt,
167
- "text": outputs,
168
- "answer_id": ans_id,
169
- "model_id": model_name,
170
- "metadata": {}
171
- }) + "\n")
172
- ans_file.close()
173
-
174
- if __name__ == "__main__":
175
- parser = argparse.ArgumentParser()
176
- parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
177
- parser.add_argument("--tokenizer-path", type=str, required=True)
178
- parser.add_argument("--model-base", type=str, default=None)
179
- parser.add_argument("--image-folder", type=str, default="")
180
- parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
181
- parser.add_argument("--answers-file", type=str, default="answer.jsonl")
182
- parser.add_argument("--conv-mode", type=str, default="llava_v1")
183
- parser.add_argument("--num-chunks", type=int, default=1)
184
- parser.add_argument("--chunk-idx", type=int, default=0)
185
- parser.add_argument("--temperature", type=float, default=0.2)
186
- parser.add_argument("--top_p", type=float, default=None)
187
- parser.add_argument("--num_beams", type=int, default=1)
188
- parser.add_argument('--load_8bit', type=bool, default=False)
189
- parser.add_argument("--max_new_tokens", type=int, default=128)
190
- args = parser.parse_args()
191
-
192
- eval_model(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
inference_i2t.py DELETED
@@ -1,110 +0,0 @@
1
- import torch
2
- import argparse
3
- import PIL
4
- from PIL import Image
5
- import os
6
- from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
7
- from conversation import conv_templates, SeparatorStyle
8
- from torchvision import transforms
9
-
10
- from constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
11
-
12
- from threading import Thread
13
- from unitok.config import Args
14
- from unitok.model import UniTok
15
-
16
- from model.builder import load_pretrained_model
17
- from mm_utils import tokenizer_image_token, get_model_name_from_path
18
-
19
-
20
- IMAGE_TOKEN_INDEX=-200
21
-
22
- def expand2square(pil_img, background_color):
23
- width, height = pil_img.size
24
- if width == height:
25
- return pil_img
26
- elif width > height:
27
- result = Image.new(pil_img.mode, (width, width), background_color)
28
- result.paste(pil_img, (0, (width - height) // 2))
29
- return result
30
- else:
31
- result = Image.new(pil_img.mode, (height, height), background_color)
32
- result.paste(pil_img, ((height - width) // 2, 0))
33
- return result
34
-
35
-
36
- def main(args):
37
-
38
- ckpt = torch.load(args.unitok_path, map_location='cpu')
39
- vae_cfg = Args()
40
- vae_cfg.load_state_dict(ckpt['args'])
41
- vq_model = UniTok(vae_cfg)
42
- vq_model.load_state_dict(ckpt['trainer']['unitok'])
43
- vq_model.to('cuda')
44
- vq_model.eval()
45
-
46
- model_path = os.path.expanduser(args.mllm_path)
47
- model_name = get_model_name_from_path(model_path)
48
- tokenizer, vqllm, image_processor, context_len = load_pretrained_model(model_path, model_name, load_8bit=args.load_8bit)
49
-
50
- qs = args.prompt
51
- qs = '<boi><image><eoi>' + '\n' + qs
52
- conv = conv_templates['llava_v1'].copy()
53
- conv.append_message(conv.roles[0], qs)
54
- conv.append_message(conv.roles[1], None)
55
- prompt = conv.get_prompt()
56
-
57
- crop_size = 256
58
- transform = transforms.Compose([
59
- transforms.Resize((crop_size, crop_size)),
60
- transforms.ToTensor(),
61
- transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
62
- ])
63
-
64
- print(prompt)
65
- image = Image.open(args.image_path).convert('RGB')
66
- pad_image = expand2square(image, (122, 116, 104) )
67
- # import pdb;pdb.set_trace()
68
- img = transform(pad_image).unsqueeze(0)
69
- img = img.to('cuda')
70
- # import pdb;pdb.set_trace()
71
- with torch.no_grad():
72
- vq_code = vq_model.img_to_idx(img)
73
- image_codes = vq_code.unsqueeze(0)
74
-
75
- input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
76
-
77
- # input_ids = torch.cat(text_ids, dim=0)
78
- # input_embeddings = vqllm.embed_tokens(input_ids)
79
- inputs = {
80
- "inputs":input_ids.unsqueeze(0).to("cuda:0"),
81
- "images":image_codes.to("cuda:0"),
82
- "max_new_tokens":1024,
83
- "bos_token_id":tokenizer.bos_token_id, # Begin of sequence token
84
- "eos_token_id":tokenizer.eos_token_id, # End of sequence token
85
- "pad_token_id":tokenizer.pad_token_id, # Pad token
86
- }
87
- streamer = TextIteratorStreamer(tokenizer, **{"skip_special_tokens": True, "skip_prompt": True})
88
-
89
- # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way.
90
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
91
- thread = Thread(target=vqllm.generate_mllm, kwargs=generation_kwargs)
92
- thread.start()
93
- generated_text = ""
94
- for new_text in streamer:
95
- generated_text += new_text
96
- print(generated_text)
97
-
98
-
99
- if __name__ == '__main__':
100
- parser = argparse.ArgumentParser(description='Process some integers.')
101
- parser.add_argument('--unitok_path', type=str, default=r'D:\projects\liquid_app\UniTok\UniTok_weights\unitok_tokenizer\unitok_tokenizer.pth',required=False)
102
- parser.add_argument('--mllm_path', type=str, default= r'C:\debug_ckpts\unitok_mllm', required=False)
103
- parser.add_argument('--prompt', type=str, required=True, help='input text prompt')
104
- parser.add_argument('--image_path', type=str, required=True, help='input image path')
105
- parser.add_argument('--load_8bit', action='store_true', default=False, help='use 8bit to save memory')
106
-
107
- args = parser.parse_args()
108
- main(args)
109
-
110
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
t2i.py DELETED
@@ -1,224 +0,0 @@
1
- import os
2
- import sys
3
- import torch
4
- import argparse
5
- import numpy as np
6
- from tqdm import tqdm
7
- from torchvision import transforms
8
- from torch.nn import functional as F
9
- from transformers import AutoTokenizer, AutoModelForCausalLM
10
-
11
- from model import *
12
- from unitok.config import Args
13
- from unitok.model import UniTok
14
-
15
-
16
- PILtransform = transforms.ToPILImage()
17
-
18
-
19
- def top_k_top_p_filtering(
20
- logits,
21
- top_k: int = 0,
22
- top_p: float = 1.0,
23
- filter_value: float = -float("Inf"),
24
- min_tokens_to_keep: int = 1,
25
- ):
26
- """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
27
- Args:
28
- logits: logits distribution shape (batch size, vocabulary size)
29
- if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
30
- if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
31
- Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
32
- Make sure we keep at least min_tokens_to_keep per batch example in the output
33
- From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
34
- """
35
-
36
- if top_k > 0:
37
- top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
38
- # Remove all tokens with a probability less than the last token of the top-k
39
-
40
- indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
41
- logits[indices_to_remove] = filter_value
42
-
43
- if top_p < 1.0:
44
- sorted_logits, sorted_indices = torch.sort(logits, descending=True)
45
- cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
46
-
47
- # Remove tokens with cumulative probability above the threshold (token with 0 are kept)
48
- sorted_indices_to_remove = cumulative_probs > top_p
49
- if min_tokens_to_keep > 1:
50
- # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
51
- sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
52
- # Shift the indices to the right to keep also the first token above the threshold
53
- sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
54
- sorted_indices_to_remove[..., 0] = 0
55
-
56
- # scatter sorted tensors to original indexing
57
- indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
58
- logits[indices_to_remove] = filter_value
59
- # import pdb;pdb.set_trace()
60
- return logits
61
-
62
-
63
- def sample(logits, temperature: float = 1.0, top_k: int = 0, top_p: float = 1.0, sample_logits=True):
64
- logits = logits[:, -1, :] / max(temperature, 1e-5)
65
- if top_k > 0 or top_p < 1.0:
66
- logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
67
- probs = F.softmax(logits, dim=-1)
68
- if sample_logits:
69
- idx = torch.multinomial(probs, num_samples=1)
70
- else:
71
- _, idx = torch.topk(probs, k=1, dim=-1)
72
- return idx, probs
73
-
74
-
75
- def split_list(input_list, chunk_size):
76
- return [input_list[i:i + chunk_size] for i in range(0, len(input_list), chunk_size)]
77
-
78
-
79
- def get_args_parser():
80
- parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
81
- parser.add_argument('--unitok_path', type=str, required=True)
82
- parser.add_argument('--mllm_path', type=str, required=True)
83
- parser.add_argument('--prompt_file', type=str, required=True)
84
- parser.add_argument('--result_dir', type=str, required=True)
85
- parser.add_argument('--idx', type=int, default=0)
86
- parser.add_argument('--tau', type=float, default=0.9)
87
- parser.add_argument('--topk', type=int, default=2048)
88
- parser.add_argument('--topp', type=float, default=0.96)
89
- parser.add_argument('--cfg_scale', type=float, default=5.0)
90
- return parser
91
-
92
-
93
- def main(args):
94
- text_set_id = args.idx
95
- tau = args.tau
96
- topk = args.topk
97
- topp = args.topp
98
- cfg_scale = args.cfg_scale
99
-
100
- print('loading vq model ...')
101
- ckpt = torch.load(args.unitok_path, map_location='cpu')
102
- vae_cfg = Args()
103
- vae_cfg.load_state_dict(ckpt['args'])
104
- vq_model = UniTok(vae_cfg)
105
- vq_model.load_state_dict(ckpt['trainer']['unitok'])
106
- vq_model.to('cuda')
107
- vq_model.eval()
108
-
109
- image_save_pth = '{}/GenAI-cfg_{}-topk_{}-topp_{}-tau_{}'.format(args.result_dir, str(cfg_scale), str(topk), str(topp), str(tau))
110
-
111
- tokenizer = AutoTokenizer.from_pretrained(args.mllm_path, padding_side='left')
112
- vqllm = AutoModelForCausalLM.from_pretrained(
113
- args.mllm_path,
114
- attn_implementation='flash_attention_2',
115
- torch_dtype=torch.bfloat16
116
- ).to('cuda')
117
-
118
- num_processes = 8
119
- chunk_size = 8 # batchsize
120
- num_codebooks = vae_cfg.num_codebooks
121
-
122
- with open(args.prompt_file, 'r') as f:
123
- lines = f.readlines()
124
- all_prompts = []
125
- for index, line in enumerate(lines):
126
- all_prompts.append({'Index': str(index + 1).zfill(5), 'Prompt': line.strip()})
127
-
128
- chunked_filenames = np.array_split(all_prompts, num_processes)
129
- subset = chunked_filenames[text_set_id].tolist()
130
- chunk_inputs = split_list(subset, chunk_size)
131
- for chunk in tqdm(chunk_inputs):
132
- text_inputs = [v['Prompt'] for v in chunk]
133
- uncondition_text_inputs = ['<unconditional>'] * len(text_inputs)
134
- for i in range(len(text_inputs)):
135
- text_inputs[i] = text_inputs[i] + ' Generate an image based on this description.'
136
- ori_batchsize = len(text_inputs)
137
-
138
- save_list = []
139
- if cfg_scale > 1:
140
- model_inputs = tokenizer(text_inputs + uncondition_text_inputs, return_tensors="pt", padding=True).to('cuda')
141
- total_batchsize = len(text_inputs + uncondition_text_inputs)
142
- model_inputs['input_ids'] = torch.cat([
143
- model_inputs['input_ids'],
144
- torch.empty(total_batchsize, 1).fill_(3).to(model_inputs['input_ids'])
145
- ], dim=1)
146
- model_inputs['attention_mask'] = torch.cat([
147
- model_inputs['attention_mask'],
148
- torch.empty(total_batchsize, 1).fill_(1).to(model_inputs['attention_mask'])
149
- ], dim=1)
150
- else:
151
- model_inputs = tokenizer(text_inputs, return_tensors="pt", padding=True).to('cuda')
152
- total_batchsize = len(text_inputs)
153
- model_inputs['input_ids'] = torch.cat([
154
- model_inputs['input_ids'],
155
- torch.empty(total_batchsize, 1).fill_(3).to(model_inputs['input_ids'])
156
- ], dim=1)
157
- model_inputs['attention_mask'] = torch.cat([
158
- model_inputs['attention_mask'],
159
- torch.empty(total_batchsize, 1).fill_(1).to(model_inputs['attention_mask'])
160
- ], dim=1)
161
- with torch.no_grad():
162
- sampling_kwargs = {'temperature': tau, 'top_k': topk, 'top_p': topp, 'sample_logits': True}
163
- pred_tokens = []
164
- input_multi_ids = None
165
- for _ in range(256):
166
- outputs = vqllm.T2I_forward_nocache(
167
- **model_inputs,
168
- input_multi_ids=input_multi_ids,
169
- use_cache=None,
170
- return_dict=True,
171
- output_attentions=False,
172
- output_hidden_states=False,
173
- )
174
- next_embed = outputs['last_hidden_state'][:, -1:, :]
175
-
176
- indices_arhead = []
177
- for i_head in range(num_codebooks):
178
- ar_next_embed = vqllm.ar_head(
179
- inputs_embeds=next_embed,
180
- use_cache=False,
181
- output_attentions=False,
182
- output_hidden_states=False,
183
- return_dict=False,
184
- )
185
- next_token_logits = vqllm.ar_head.linear_head(ar_next_embed)
186
- if cfg_scale > 1:
187
- cond_logits, uncond_logits = torch.split(next_token_logits, len(next_token_logits) // 2, dim=0)
188
- cfg_logits = uncond_logits + (cond_logits - uncond_logits) * cfg_scale
189
- half_next_token, _ = sample(cfg_logits, **sampling_kwargs)
190
- next_token = torch.cat([half_next_token, half_next_token]) # [bz,1]
191
- else:
192
- next_token, next_prob = sample(next_token_logits, **sampling_kwargs)
193
-
194
- indices_arhead.append(next_token)
195
- if i_head < num_codebooks - 1:
196
- predicted_embed = vqllm.ar_head.codebooks[i_head](next_token)
197
- next_embed = torch.cat([next_embed, predicted_embed], dim=1)
198
-
199
- # update generated ids, model inputs, and length for next step
200
- pred_tokens.append(torch.cat(indices_arhead, dim=1)) # [numcodebook,bz*2]
201
- input_multi_ids = torch.stack(pred_tokens, dim=-1)
202
-
203
- del sampling_kwargs, model_inputs, outputs
204
-
205
- image_vq_id = torch.stack(pred_tokens, dim=-1)[:ori_batchsize]
206
- save_list.append(image_vq_id)
207
-
208
- torch.cuda.empty_cache()
209
-
210
- print('decoding images ...')
211
- if not os.path.exists(image_save_pth):
212
- os.makedirs(image_save_pth)
213
- for datainfo, vq_code in zip(chunk, save_list[0]):
214
- idx = datainfo['Index']
215
- new_gen_ids = vq_code.unsqueeze(0).to('cuda')
216
- rec_image = vq_model.idx_to_img(new_gen_ids)
217
- rec_img = PILtransform(rec_image.squeeze(0).add(1).mul_(0.5).clamp_(0, 1))
218
- rec_img.save('{}/{}.jpg'.format(image_save_pth, str(idx)))
219
-
220
-
221
- if __name__ == '__main__':
222
- parser = argparse.ArgumentParser('genai inference script', parents=[get_args_parser()])
223
- args = parser.parse_args()
224
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
t2i_new.py DELETED
@@ -1,243 +0,0 @@
1
- import os
2
- import sys
3
- import torch
4
- import argparse
5
- import numpy as np
6
- from tqdm import tqdm
7
- from torchvision import transforms
8
- from torch.nn import functional as F
9
- from transformers import AutoTokenizer, AutoModelForCausalLM
10
-
11
- from model import *
12
- from unitok.config import Args
13
- from unitok.model import UniTok
14
-
15
-
16
- PILtransform = transforms.ToPILImage()
17
-
18
-
19
- def top_k_top_p_filtering(
20
- logits,
21
- top_k: int = 0,
22
- top_p: float = 1.0,
23
- filter_value: float = -float("Inf"),
24
- min_tokens_to_keep: int = 1,
25
- ):
26
- """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
27
- Args:
28
- logits: logits distribution shape (batch size, vocabulary size)
29
- if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
30
- if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
31
- Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
32
- Make sure we keep at least min_tokens_to_keep per batch example in the output
33
- From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
34
- """
35
-
36
- if top_k > 0:
37
- top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
38
- # Remove all tokens with a probability less than the last token of the top-k
39
-
40
- indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
41
- logits[indices_to_remove] = filter_value
42
-
43
- if top_p < 1.0:
44
- sorted_logits, sorted_indices = torch.sort(logits, descending=True)
45
- cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
46
-
47
- # Remove tokens with cumulative probability above the threshold (token with 0 are kept)
48
- sorted_indices_to_remove = cumulative_probs > top_p
49
- if min_tokens_to_keep > 1:
50
- # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
51
- sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
52
- # Shift the indices to the right to keep also the first token above the threshold
53
- sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
54
- sorted_indices_to_remove[..., 0] = 0
55
-
56
- # scatter sorted tensors to original indexing
57
- indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
58
- logits[indices_to_remove] = filter_value
59
- # import pdb;pdb.set_trace()
60
- return logits
61
-
62
-
63
- def sample(logits, temperature: float = 1.0, top_k: int = 0, top_p: float = 1.0, sample_logits=True):
64
- logits = logits[:, -1, :] / max(temperature, 1e-5)
65
- if top_k > 0 or top_p < 1.0:
66
- logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
67
- probs = F.softmax(logits, dim=-1)
68
- if sample_logits:
69
- idx = torch.multinomial(probs, num_samples=1)
70
- else:
71
- _, idx = torch.topk(probs, k=1, dim=-1)
72
- return idx, probs
73
-
74
-
75
- def split_list(input_list, chunk_size):
76
- return [input_list[i:i + chunk_size] for i in range(0, len(input_list), chunk_size)]
77
-
78
-
79
- def get_args_parser():
80
- parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
81
- parser.add_argument('--unitok_path', type=str, default=r'D:\projects\liquid_app\UniTok\UniTok_weights\unitok_tokenizer\unitok_tokenizer.pth',required=False)
82
- parser.add_argument('--mllm_path', type=str, default= r'C:\debug_ckpts\unitok_mllm', required=False)
83
- # parser.add_argument('--prompt_file', type=str, required=True)
84
- # parser.add_argument('--result_dir', type=str, required=True)
85
- parser.add_argument('--idx', type=int, default=0)
86
- parser.add_argument('--tau', type=float, default=0.9)
87
- parser.add_argument('--topk', type=int, default=2048)
88
- parser.add_argument('--topp', type=float, default=0.96)
89
- parser.add_argument('--cfg_scale', type=float, default=5.0)
90
- return parser
91
-
92
-
93
- def main(args):
94
- text_set_id = args.idx
95
- tau = args.tau
96
- topk = args.topk
97
- topp = args.topp
98
- cfg_scale = args.cfg_scale
99
-
100
- print('loading vq model ...')
101
- ckpt = torch.load(args.unitok_path, map_location='cpu')
102
- vae_cfg = Args()
103
- vae_cfg.load_state_dict(ckpt['args'])
104
- vq_model = UniTok(vae_cfg)
105
- vq_model.load_state_dict(ckpt['trainer']['unitok'])
106
- vq_model.to('cuda')
107
- vq_model.eval()
108
-
109
-
110
- tokenizer = AutoTokenizer.from_pretrained(args.mllm_path, padding_side='left')
111
- vqllm = MiniGeminiLlamaForCausalLM.from_pretrained(
112
- args.mllm_path,
113
- attn_implementation='flash_attention_2',
114
- torch_dtype=torch.bfloat16
115
- ).to('cuda')
116
- num_codebooks = vae_cfg.num_codebooks
117
- # import pdb;pdb.set_trace()
118
- chunk_inputs = [[{'Prompt':'a dog in grasee'},{'Prompt':'a dog in grasee'},{'Prompt':'a dog in grasee'},{'Prompt':'a dog in grasee'}]]
119
- for chunk in tqdm(chunk_inputs):
120
- text_inputs = [v['Prompt'] for v in chunk]
121
- uncondition_text_inputs = ['<unconditional>\x00'] * len(text_inputs)
122
- for i in range(len(text_inputs)):
123
- text_inputs[i] = text_inputs[i] + ' Generate an image based on this description.\x00'
124
- ori_batchsize = len(text_inputs)
125
-
126
- save_list = []
127
- if cfg_scale > 1:
128
- model_inputs = tokenizer(text_inputs + uncondition_text_inputs, return_tensors="pt", padding=True).to('cuda')
129
- else:
130
- model_inputs = tokenizer(text_inputs, return_tensors="pt", padding=True).to('cuda')
131
-
132
-
133
- model_kwargs = {'attention_mask':model_inputs.pop('attention_mask'),
134
- 'use_cache': True
135
- }
136
- input_ids = model_inputs.pop('input_ids')
137
- batch_size, cur_len = input_ids.shape
138
- if "inputs_embeds" in model_kwargs:
139
- cur_len = model_kwargs["inputs_embeds"].shape[1]
140
- model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device)
141
- # import pdb;pdb.set_trace()
142
-
143
-
144
- with torch.no_grad():
145
- sampling_kwargs = {'temperature': tau, 'top_k': topk, 'top_p': topp, 'sample_logits': True}
146
-
147
- pred_tokens = []
148
- input_multi_ids = None
149
- for i in tqdm(range(256)):
150
- model_inputs = vqllm.prepare_inputs_for_generation(input_ids, **model_kwargs) #model_inputs['input_ids'], model_inputs['position_ids'], model_inputs['cache_position']
151
- # import pdb;pdb.set_trace() #model_inputs['position_ids'] , model_inputs['cache_position']
152
- outputs = vqllm.T2I_forward_withcache(
153
- **model_inputs,
154
- input_multi_ids=input_multi_ids,
155
- return_dict=True,
156
- output_attentions=False,
157
- output_hidden_states=False,
158
- )
159
-
160
- # import pdb;pdb.set_trace()
161
- next_embed = outputs['last_hidden_state'][:, -1:, :]
162
- # next_token_logits = outputs.logits[:, -1:, :]
163
- indices_arhead = []
164
-
165
- # for i_head in range(num_codebooks):
166
- # ar_next_embed = vqllm.ar_head(inputs_embeds=next_embed,use_cache=False,output_attentions=False, output_hidden_states=False, return_dict=False,)
167
- # next_token_logits = vqllm.ar_head.linear_head(ar_next_embed[:,-1:,:])
168
- # # import pdb;pdb.set_trace()
169
- # image_probs = F.softmax(next_token_logits, dim=-1)
170
- # _, image_idx = torch.topk(image_probs, k=1, dim=-1) #[numcodebook,256,1]
171
- # next_token = image_idx[:,:,0]
172
- # indices_arhead.append(next_token)
173
- # # import pdb;pdb.set_trace()
174
- # # pred_tokens.append(next_token)
175
- # if i_head<num_codebooks-1:
176
- # predicted_embed = vqllm.ar_head.codebooks[i_head](next_token)
177
- # next_embed = torch.cat([next_embed,predicted_embed],dim=1)
178
-
179
- for i_head in range(num_codebooks):
180
- ar_next_embed = vqllm.ar_head(
181
- inputs_embeds=next_embed,
182
- use_cache=False,
183
- output_attentions=False,
184
- output_hidden_states=False,
185
- return_dict=False,
186
- )
187
- next_token_logits = vqllm.ar_head.linear_head(ar_next_embed[0])
188
- if cfg_scale > 1:
189
- cond_logits, uncond_logits = torch.split(next_token_logits, len(next_token_logits) // 2, dim=0)
190
- cfg_logits = uncond_logits + (cond_logits - uncond_logits) * cfg_scale
191
- half_next_token, _ = sample(cfg_logits, **sampling_kwargs)
192
- # pred_tokens.append(half_next_token)
193
- next_token = torch.cat([half_next_token, half_next_token]) # [bz,1]
194
- else:
195
- next_token, next_prob = sample(next_token_logits, **sampling_kwargs)
196
- # pred_tokens.append(next_token)
197
- # import pdb;pdb.set_trace()
198
- indices_arhead.append(next_token)
199
- if i_head < num_codebooks - 1:
200
- predicted_embed = vqllm.ar_head.codebooks[i_head](next_token)
201
- next_embed = torch.cat([next_embed, predicted_embed], dim=1)
202
-
203
- # update generated ids, model inputs, and length for next step
204
- # import pdb;pdb.set_trace()
205
- pred_tokens.append(torch.cat(indices_arhead, dim=1)) # [numcodebook,bz*2]
206
- input_multi_ids = torch.stack(pred_tokens, dim=-1)
207
- # import pdb;pdb.set_trace()
208
- fake_id = torch.zeros_like(input_ids[:,:1])
209
- input_ids = torch.cat([input_ids, fake_id], dim=-1) # add fake id for cache
210
-
211
- model_kwargs = vqllm._update_model_kwargs_for_generation(
212
- outputs,
213
- model_kwargs,
214
- is_encoder_decoder=vqllm.config.is_encoder_decoder,
215
- )
216
- pass
217
- del sampling_kwargs
218
- del model_inputs
219
- del outputs
220
- del model_kwargs
221
- # image_vq_id = input_ids[:,prompt_length:prompt_length+256]-ori_vocabe_size
222
- image_vq_id = torch.stack(pred_tokens, dim=-1)[:ori_batchsize]
223
- # print(set(image_vq_id.tolist()))
224
- save_list.append(image_vq_id)
225
-
226
- torch.cuda.empty_cache()
227
-
228
- print('decoding images ...')
229
- image_save_pth = 'visualresults'
230
- if not os.path.exists(image_save_pth):
231
- os.makedirs(image_save_pth)
232
- for datainfo, vq_code in zip(chunk, save_list[0]):
233
- new_gen_ids = vq_code.unsqueeze(0).to('cuda')
234
- # import pdb;pdb.set_trace()
235
- rec_image = vq_model.idx_to_img(new_gen_ids)
236
- rec_img = PILtransform(rec_image.squeeze(0).add(1).mul_(0.5).clamp_(0, 1))
237
- rec_img.save('{}/{}.jpg'.format(image_save_pth, 'test'))
238
-
239
-
240
- if __name__ == '__main__':
241
- parser = argparse.ArgumentParser('genai inference script', parents=[get_args_parser()])
242
- args = parser.parse_args()
243
- main(args)