Andrei Panferov commited on
Commit
400eeaa
1 Parent(s): aebd979

try except import

Browse files
Files changed (1) hide show
  1. modeling_llama_aqlm.py +5 -2
modeling_llama_aqlm.py CHANGED
@@ -56,8 +56,11 @@ from transformers.utils.import_utils import is_torch_fx_available
56
  from .configuration_llama_aqlm import LlamaConfig
57
 
58
  if is_flash_attn_2_available():
59
- from flash_attn import flash_attn_func, flash_attn_varlen_func
60
- from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
 
 
 
61
 
62
 
63
  # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
 
56
  from .configuration_llama_aqlm import LlamaConfig
57
 
58
  if is_flash_attn_2_available():
59
+ try:
60
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
61
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
62
+ except:
63
+ pass
64
 
65
 
66
  # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.