czczup commited on
Commit
3cd6c6d
1 Parent(s): d0cf52b

fix compatibility issue for transformers 4.46+

Browse files
configuration_intern_vit.py CHANGED
@@ -3,6 +3,7 @@
3
  # Copyright (c) 2024 OpenGVLab
4
  # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
 
6
  import os
7
  from typing import Union
8
 
 
3
  # Copyright (c) 2024 OpenGVLab
4
  # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
6
+
7
  import os
8
  from typing import Union
9
 
configuration_internvl_chat.py CHANGED
@@ -47,12 +47,12 @@ class InternVLChatConfig(PretrainedConfig):
47
  logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
48
 
49
  self.vision_config = InternVisionConfig(**vision_config)
50
- if llm_config['architectures'][0] == 'LlamaForCausalLM':
51
  self.llm_config = LlamaConfig(**llm_config)
52
- elif llm_config['architectures'][0] == 'Phi3ForCausalLM':
53
  self.llm_config = Phi3Config(**llm_config)
54
  else:
55
- raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
56
  self.use_backbone_lora = use_backbone_lora
57
  self.use_llm_lora = use_llm_lora
58
  self.select_layer = select_layer
 
47
  logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
48
 
49
  self.vision_config = InternVisionConfig(**vision_config)
50
+ if llm_config.get(['architectures'])[0] == 'LlamaForCausalLM':
51
  self.llm_config = LlamaConfig(**llm_config)
52
+ elif llm_config.get(['architectures'])[0] == 'Phi3ForCausalLM':
53
  self.llm_config = Phi3Config(**llm_config)
54
  else:
55
+ raise ValueError('Unsupported architecture: {}'.format(llm_config.get(['architectures'])[0]))
56
  self.use_backbone_lora = use_backbone_lora
57
  self.use_llm_lora = use_llm_lora
58
  self.select_layer = select_layer
modeling_internvl_chat.py CHANGED
@@ -3,6 +3,7 @@
3
  # Copyright (c) 2024 OpenGVLab
4
  # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
 
6
  import warnings
7
  from typing import Any, List, Optional, Tuple, Union
8
 
@@ -237,7 +238,7 @@ class InternVLChatModel(PreTrainedModel):
237
  model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
238
  input_ids = model_inputs['input_ids'].to(self.device)
239
  attention_mask = model_inputs['attention_mask'].to(self.device)
240
- eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
241
  generation_config['eos_token_id'] = eos_token_id
242
  generation_output = self.generate(
243
  pixel_values=pixel_values,
@@ -246,7 +247,7 @@ class InternVLChatModel(PreTrainedModel):
246
  **generation_config
247
  )
248
  responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
249
- responses = [response.split(template.sep)[0].strip() for response in responses]
250
  return responses
251
 
252
  def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
@@ -265,7 +266,7 @@ class InternVLChatModel(PreTrainedModel):
265
 
266
  template = get_conv_template(self.template)
267
  template.system_message = self.system_message
268
- eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
269
 
270
  history = [] if history is None else history
271
  for (old_question, old_answer) in history:
@@ -294,7 +295,7 @@ class InternVLChatModel(PreTrainedModel):
294
  **generation_config
295
  )
296
  response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
297
- response = response.split(template.sep)[0].strip()
298
  history.append((question, response))
299
  if return_history:
300
  return response, history
@@ -314,7 +315,6 @@ class InternVLChatModel(PreTrainedModel):
314
  visual_features: Optional[torch.FloatTensor] = None,
315
  generation_config: Optional[GenerationConfig] = None,
316
  output_hidden_states: Optional[bool] = None,
317
- return_dict: Optional[bool] = None,
318
  **generate_kwargs,
319
  ) -> torch.LongTensor:
320
 
@@ -342,7 +342,6 @@ class InternVLChatModel(PreTrainedModel):
342
  attention_mask=attention_mask,
343
  generation_config=generation_config,
344
  output_hidden_states=output_hidden_states,
345
- return_dict=return_dict,
346
  use_cache=True,
347
  **generate_kwargs,
348
  )
 
3
  # Copyright (c) 2024 OpenGVLab
4
  # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
6
+
7
  import warnings
8
  from typing import Any, List, Optional, Tuple, Union
9
 
 
238
  model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
239
  input_ids = model_inputs['input_ids'].to(self.device)
240
  attention_mask = model_inputs['attention_mask'].to(self.device)
241
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
242
  generation_config['eos_token_id'] = eos_token_id
243
  generation_output = self.generate(
244
  pixel_values=pixel_values,
 
247
  **generation_config
248
  )
249
  responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
250
+ responses = [response.split(template.sep.strip())[0].strip() for response in responses]
251
  return responses
252
 
253
  def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
 
266
 
267
  template = get_conv_template(self.template)
268
  template.system_message = self.system_message
269
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
270
 
271
  history = [] if history is None else history
272
  for (old_question, old_answer) in history:
 
295
  **generation_config
296
  )
297
  response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
298
+ response = response.split(template.sep.strip())[0].strip()
299
  history.append((question, response))
300
  if return_history:
301
  return response, history
 
315
  visual_features: Optional[torch.FloatTensor] = None,
316
  generation_config: Optional[GenerationConfig] = None,
317
  output_hidden_states: Optional[bool] = None,
 
318
  **generate_kwargs,
319
  ) -> torch.LongTensor:
320
 
 
342
  attention_mask=attention_mask,
343
  generation_config=generation_config,
344
  output_hidden_states=output_hidden_states,
 
345
  use_cache=True,
346
  **generate_kwargs,
347
  )