Upload model
Browse files- modelling_single.py +4 -2
modelling_single.py
CHANGED
@@ -8,8 +8,9 @@ from transformers import PreTrainedTokenizerFast, VisionEncoderDecoderModel
|
|
8 |
from transformers.configuration_utils import PretrainedConfig
|
9 |
from transformers.modeling_outputs import BaseModelOutput, ModelOutput, Seq2SeqLMOutput
|
10 |
from transformers.modeling_utils import PreTrainedModel
|
11 |
-
from transformers.models.vision_encoder_decoder.configuration_vision_encoder_decoder import
|
12 |
-
VisionEncoderDecoderConfig
|
|
|
13 |
from transformers.utils import logging
|
14 |
|
15 |
logger = logging.get_logger(__name__)
|
@@ -54,6 +55,7 @@ class CvtWithProjectionHead(transformers.CvtPreTrainedModel):
|
|
54 |
pixel_values: Optional[torch.Tensor] = None,
|
55 |
output_hidden_states: Optional[bool] = None,
|
56 |
return_dict: Optional[bool] = None,
|
|
|
57 |
) -> Union[Tuple, ModelOutput]:
|
58 |
|
59 |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
8 |
from transformers.configuration_utils import PretrainedConfig
|
9 |
from transformers.modeling_outputs import BaseModelOutput, ModelOutput, Seq2SeqLMOutput
|
10 |
from transformers.modeling_utils import PreTrainedModel
|
11 |
+
from transformers.models.vision_encoder_decoder.configuration_vision_encoder_decoder import (
|
12 |
+
VisionEncoderDecoderConfig,
|
13 |
+
)
|
14 |
from transformers.utils import logging
|
15 |
|
16 |
logger = logging.get_logger(__name__)
|
|
|
55 |
pixel_values: Optional[torch.Tensor] = None,
|
56 |
output_hidden_states: Optional[bool] = None,
|
57 |
return_dict: Optional[bool] = None,
|
58 |
+
output_attentions: Optional[bool] = None,
|
59 |
) -> Union[Tuple, ModelOutput]:
|
60 |
|
61 |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|