anicolson commited on
Commit
de123c1
1 Parent(s): cd84c0c

Upload model

Browse files
config.json CHANGED
@@ -2249,5 +2249,5 @@
2249
  "model_type": "vision-encoder-decoder",
2250
  "tie_word_embeddings": false,
2251
  "torch_dtype": "float32",
2252
- "transformers_version": "4.39.0"
2253
  }
 
2249
  "model_type": "vision-encoder-decoder",
2250
  "tie_word_embeddings": false,
2251
  "torch_dtype": "float32",
2252
+ "transformers_version": "4.41.2"
2253
  }
generation_config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
  "_from_model_config": true,
3
  "pad_token_id": 0,
4
- "transformers_version": "4.39.0"
5
  }
 
1
  {
2
  "_from_model_config": true,
3
  "pad_token_id": 0,
4
+ "transformers_version": "4.41.2"
5
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0aa61ab9858781f0c3846d46c55e32d25a89433f3bc308124ded02ff75352413
3
  size 450117792
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b6f5a6bef76e2e970e390b206919fd82c44b04ef6d7d4f7908c9e085192fbfb
3
  size 450117792
modelling_longitudinal.py CHANGED
@@ -7,14 +7,13 @@ import torch
7
  import transformers
8
  from peft import LoraConfig, TaskType, get_peft_config, get_peft_model
9
  from torch.nn import CrossEntropyLoss
10
- from transformers import (AutoModel, PreTrainedTokenizerFast,
11
- VisionEncoderDecoderModel)
12
  from transformers.configuration_utils import PretrainedConfig
13
- from transformers.modeling_outputs import (BaseModelOutput, ModelOutput,
14
- Seq2SeqLMOutput)
15
  from transformers.modeling_utils import PreTrainedModel
16
- from transformers.models.vision_encoder_decoder.configuration_vision_encoder_decoder import \
17
- VisionEncoderDecoderConfig
 
18
  from transformers.utils import logging
19
 
20
  logger = logging.get_logger(__name__)
@@ -59,6 +58,7 @@ class MultiCvtWithProjectionHead(transformers.CvtPreTrainedModel):
59
  pixel_values: Optional[torch.Tensor] = None,
60
  output_hidden_states: Optional[bool] = None,
61
  return_dict: Optional[bool] = None,
 
62
  ) -> Union[Tuple, ModelOutput]:
63
 
64
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
 
7
  import transformers
8
  from peft import LoraConfig, TaskType, get_peft_config, get_peft_model
9
  from torch.nn import CrossEntropyLoss
10
+ from transformers import AutoModel, PreTrainedTokenizerFast, VisionEncoderDecoderModel
 
11
  from transformers.configuration_utils import PretrainedConfig
12
+ from transformers.modeling_outputs import BaseModelOutput, ModelOutput, Seq2SeqLMOutput
 
13
  from transformers.modeling_utils import PreTrainedModel
14
+ from transformers.models.vision_encoder_decoder.configuration_vision_encoder_decoder import (
15
+ VisionEncoderDecoderConfig,
16
+ )
17
  from transformers.utils import logging
18
 
19
  logger = logging.get_logger(__name__)
 
58
  pixel_values: Optional[torch.Tensor] = None,
59
  output_hidden_states: Optional[bool] = None,
60
  return_dict: Optional[bool] = None,
61
+ output_attentions: Optional[bool] = None,
62
  ) -> Union[Tuple, ModelOutput]:
63
 
64
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict