tms-jr commited on
Commit
0a2cd81
·
verified ·
1 Parent(s): e571548

Remove torch.fx guard for pytorch < 1.13

Browse files

This model is blocking vLLM from upgrading to transformers 4.48.0 since `is_torch_greater_or_equal_than_1_13` is no longer available as of https://github.com/huggingface/transformers/pull/35358

Files changed (1) hide show
  1. modeling_minicpm.py +2 -3
modeling_minicpm.py CHANGED
@@ -38,7 +38,7 @@ from transformers.modeling_attn_mask_utils import (
38
  )
39
  from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
40
  from transformers.modeling_utils import PreTrainedModel
41
- from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
42
  from transformers.utils import (
43
  add_start_docstrings,
44
  add_start_docstrings_to_model_forward,
@@ -61,8 +61,7 @@ except:
61
  # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
62
  # It means that the function will not be traced through and simply appear as a node in the graph.
63
  if is_torch_fx_available():
64
- if not is_torch_greater_or_equal_than_1_13:
65
- import torch.fx
66
 
67
  _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
68
 
 
38
  )
39
  from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
40
  from transformers.modeling_utils import PreTrainedModel
41
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
42
  from transformers.utils import (
43
  add_start_docstrings,
44
  add_start_docstrings_to_model_forward,
 
61
  # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
62
  # It means that the function will not be traced through and simply appear as a node in the graph.
63
  if is_torch_fx_available():
64
+ import torch.fx
 
65
 
66
  _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
67