cuierfei commited on
Commit
2e21796
·
verified ·
1 Parent(s): 1dd2047

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. README.md +7 -7
  2. modeling_intern_vit.py +6 -12
README.md CHANGED
@@ -154,7 +154,7 @@ model = AutoModel.from_pretrained(
154
  trust_remote_code=True).eval().cuda()
155
  tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
156
 
157
- generation_config = dict(max_new_tokens=1024, do_sample=False)
158
  question = 'Hello, who are you?'
159
  response, history = model.chat(tokenizer, None, question, generation_config, history=None, return_history=True)
160
  print(f'User: {question}')
@@ -185,7 +185,7 @@ image_processor = CLIPImageProcessor.from_pretrained(path)
185
  image = Image.open('./examples/image2.jpg').resize((448, 448))
186
  pixel_values = image_processor(images=image, return_tensors='pt').pixel_values.to(torch.bfloat16).cuda()
187
 
188
- generation_config = dict(max_new_tokens=1024, do_sample=False)
189
  question = '<image>\nPlease describe the image shortly.'
190
  response = model.chat(tokenizer, pixel_values, question, generation_config)
191
  print(f'User: {question}')
@@ -211,7 +211,7 @@ image_processor = CLIPImageProcessor.from_pretrained(path)
211
  image = Image.open('./examples/image2.jpg').resize((448, 448))
212
  pixel_values = image_processor(images=image, return_tensors='pt').pixel_values.to(torch.bfloat16).cuda()
213
 
214
- generation_config = dict(max_new_tokens=1024, do_sample=False)
215
  question = '<image>\nPlease describe the image in detail.'
216
  response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
217
  print(f'User: {question}')
@@ -247,7 +247,7 @@ image2 = Image.open('./examples/image2.jpg').resize((448, 448))
247
  pixel_values2 = image_processor(images=image2, return_tensors='pt').pixel_values.to(torch.bfloat16).cuda()
248
  pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
249
 
250
- generation_config = dict(max_new_tokens=1024, do_sample=False)
251
  question = '<image>\nDescribe the two images in detail.'
252
  response, history = model.chat(tokenizer, pixel_values, question, generation_config,
253
  history=None, return_history=True)
@@ -286,7 +286,7 @@ pixel_values2 = image_processor(images=image2, return_tensors='pt').pixel_values
286
  pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
287
  num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
288
 
289
- generation_config = dict(max_new_tokens=1024, do_sample=False)
290
  question = 'Image-1: <image>\nImage-2: <image>\nDescribe the two images in detail.'
291
  response, history = model.chat(tokenizer, pixel_values, question, generation_config,
292
  num_patches_list=num_patches_list, history=None, return_history=True)
@@ -323,7 +323,7 @@ pixel_values2 = image_processor(images=image2, return_tensors='pt').pixel_values
323
  pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
324
  num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
325
 
326
- generation_config = dict(max_new_tokens=1024, do_sample=False)
327
  questions = ['<image>\nDescribe the image in detail.'] * len(num_patches_list)
328
  responses = model.batch_chat(tokenizer, pixel_values,
329
  num_patches_list=num_patches_list,
@@ -385,7 +385,7 @@ model = AutoModel.from_pretrained(
385
  trust_remote_code=True).eval().cuda()
386
  tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
387
 
388
- generation_config = dict(max_new_tokens=1024, do_sample=False)
389
 
390
  video_path = './examples/red-panda.mp4'
391
  pixel_values, num_patches_list = load_video(video_path, num_segments=8)
 
154
  trust_remote_code=True).eval().cuda()
155
  tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
156
 
157
+ generation_config = dict(max_new_tokens=1024, do_sample=True)
158
  question = 'Hello, who are you?'
159
  response, history = model.chat(tokenizer, None, question, generation_config, history=None, return_history=True)
160
  print(f'User: {question}')
 
185
  image = Image.open('./examples/image2.jpg').resize((448, 448))
186
  pixel_values = image_processor(images=image, return_tensors='pt').pixel_values.to(torch.bfloat16).cuda()
187
 
188
+ generation_config = dict(max_new_tokens=1024, do_sample=True)
189
  question = '<image>\nPlease describe the image shortly.'
190
  response = model.chat(tokenizer, pixel_values, question, generation_config)
191
  print(f'User: {question}')
 
211
  image = Image.open('./examples/image2.jpg').resize((448, 448))
212
  pixel_values = image_processor(images=image, return_tensors='pt').pixel_values.to(torch.bfloat16).cuda()
213
 
214
+ generation_config = dict(max_new_tokens=1024, do_sample=True)
215
  question = '<image>\nPlease describe the image in detail.'
216
  response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
217
  print(f'User: {question}')
 
247
  pixel_values2 = image_processor(images=image2, return_tensors='pt').pixel_values.to(torch.bfloat16).cuda()
248
  pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
249
 
250
+ generation_config = dict(max_new_tokens=1024, do_sample=True)
251
  question = '<image>\nDescribe the two images in detail.'
252
  response, history = model.chat(tokenizer, pixel_values, question, generation_config,
253
  history=None, return_history=True)
 
286
  pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
287
  num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
288
 
289
+ generation_config = dict(max_new_tokens=1024, do_sample=True)
290
  question = 'Image-1: <image>\nImage-2: <image>\nDescribe the two images in detail.'
291
  response, history = model.chat(tokenizer, pixel_values, question, generation_config,
292
  num_patches_list=num_patches_list, history=None, return_history=True)
 
323
  pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
324
  num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
325
 
326
+ generation_config = dict(max_new_tokens=1024, do_sample=True)
327
  questions = ['<image>\nDescribe the image in detail.'] * len(num_patches_list)
328
  responses = model.batch_chat(tokenizer, pixel_values,
329
  num_patches_list=num_patches_list,
 
385
  trust_remote_code=True).eval().cuda()
386
  tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
387
 
388
+ generation_config = dict(max_new_tokens=1024, do_sample=True)
389
 
390
  video_path = './examples/red-panda.mp4'
391
  pixel_values, num_patches_list = load_video(video_path, num_segments=8)
modeling_intern_vit.py CHANGED
@@ -20,18 +20,12 @@ from transformers.utils import logging
20
  from .configuration_intern_vit import InternVisionConfig
21
 
22
  try:
23
- try: # v1
24
- from flash_attn.flash_attn_interface import \
25
- flash_attn_unpadded_qkvpacked_func
26
- except: # v2
27
- from flash_attn.flash_attn_interface import \
28
- flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
29
-
30
  from flash_attn.bert_padding import pad_input, unpad_input
31
-
 
32
  has_flash_attn = True
33
  except:
34
- print('FlashAttention is not installed.')
35
  has_flash_attn = False
36
 
37
  logger = logging.get_logger(__name__)
@@ -74,7 +68,7 @@ class FlashAttention(nn.Module):
74
  max_s = seqlen
75
  cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
76
  device=qkv.device)
77
- output = flash_attn_unpadded_qkvpacked_func(
78
  qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
79
  softmax_scale=self.softmax_scale, causal=causal
80
  )
@@ -84,7 +78,7 @@ class FlashAttention(nn.Module):
84
  x = rearrange(qkv, 'b s three h d -> b s (three h d)')
85
  x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
86
  x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
87
- output_unpad = flash_attn_unpadded_qkvpacked_func(
88
  x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
89
  softmax_scale=self.softmax_scale, causal=causal
90
  )
@@ -93,7 +87,7 @@ class FlashAttention(nn.Module):
93
  'b s (h d) -> b s h d', h=nheads)
94
  else:
95
  assert max_s is not None
96
- output = flash_attn_unpadded_qkvpacked_func(
97
  qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
98
  softmax_scale=self.softmax_scale, causal=causal
99
  )
 
20
  from .configuration_intern_vit import InternVisionConfig
21
 
22
  try:
 
 
 
 
 
 
 
23
  from flash_attn.bert_padding import pad_input, unpad_input
24
+ from flash_attn.flash_attn_interface import \
25
+ flash_attn_varlen_qkvpacked_func
26
  has_flash_attn = True
27
  except:
28
+ print('FlashAttention2 is not installed.')
29
  has_flash_attn = False
30
 
31
  logger = logging.get_logger(__name__)
 
68
  max_s = seqlen
69
  cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
70
  device=qkv.device)
71
+ output = flash_attn_varlen_qkvpacked_func(
72
  qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
73
  softmax_scale=self.softmax_scale, causal=causal
74
  )
 
78
  x = rearrange(qkv, 'b s three h d -> b s (three h d)')
79
  x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
80
  x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
81
+ output_unpad = flash_attn_varlen_qkvpacked_func(
82
  x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
83
  softmax_scale=self.softmax_scale, causal=causal
84
  )
 
87
  'b s (h d) -> b s h d', h=nheads)
88
  else:
89
  assert max_s is not None
90
+ output = flash_attn_varlen_qkvpacked_func(
91
  qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
92
  softmax_scale=self.softmax_scale, causal=causal
93
  )