anicolson commited on
Commit
9eb471c
1 Parent(s): 1f01463

Upload model

Browse files
config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_commit_hash": null,
3
  "architectures": [
4
  "LongitudinalPromptMultiCXREncoderDecoderModel"
5
  ],
@@ -78,7 +77,6 @@
78
  "top_p": 1.0,
79
  "torch_dtype": null,
80
  "torchscript": false,
81
- "transformers_version": "4.31.0",
82
  "type_vocab_size": 2,
83
  "typical_p": 1.0,
84
  "use_bfloat16": false,
@@ -2243,7 +2241,6 @@
2243
  "top_p": 1.0,
2244
  "torch_dtype": "float32",
2245
  "torchscript": false,
2246
- "transformers_version": "4.31.0",
2247
  "typical_p": 1.0,
2248
  "use_bfloat16": false
2249
  },
@@ -2251,5 +2248,5 @@
2251
  "model_type": "vision-encoder-decoder",
2252
  "tie_word_embeddings": false,
2253
  "torch_dtype": "float32",
2254
- "transformers_version": null
2255
  }
 
1
  {
 
2
  "architectures": [
3
  "LongitudinalPromptMultiCXREncoderDecoderModel"
4
  ],
 
77
  "top_p": 1.0,
78
  "torch_dtype": null,
79
  "torchscript": false,
 
80
  "type_vocab_size": 2,
81
  "typical_p": 1.0,
82
  "use_bfloat16": false,
 
2241
  "top_p": 1.0,
2242
  "torch_dtype": "float32",
2243
  "torchscript": false,
 
2244
  "typical_p": 1.0,
2245
  "use_bfloat16": false
2246
  },
 
2248
  "model_type": "vision-encoder-decoder",
2249
  "tie_word_embeddings": false,
2250
  "torch_dtype": "float32",
2251
+ "transformers_version": "4.36.2"
2252
  }
generation_config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
  "_from_model_config": true,
3
  "pad_token_id": 0,
4
- "transformers_version": "4.31.0"
5
  }
 
1
  {
2
  "_from_model_config": true,
3
  "pad_token_id": 0,
4
+ "transformers_version": "4.36.2"
5
  }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:384615a2c239c94d47725204477ef2b1e8f3faa0e4f099e7b9b709f2d49c5b50
3
+ size 450117528
modelling_longitudinal.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
  import warnings
 
3
  from typing import Any, Optional, Tuple, Union
4
 
5
  import torch
@@ -9,7 +10,8 @@ from torch.nn import CrossEntropyLoss
9
  from transformers import (AutoModel, PreTrainedTokenizerFast,
10
  VisionEncoderDecoderModel)
11
  from transformers.configuration_utils import PretrainedConfig
12
- from transformers.modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
 
13
  from transformers.modeling_utils import PreTrainedModel
14
  from transformers.models.vision_encoder_decoder.configuration_vision_encoder_decoder import \
15
  VisionEncoderDecoderConfig
@@ -24,11 +26,6 @@ class CvtWithProjectionHeadConfig(transformers.CvtConfig):
24
  self.projection_size = projection_size
25
 
26
 
27
- class ModelOutputWithProjectionEmbedding(transformers.modeling_outputs.ModelOutput):
28
- last_hidden_state: torch.FloatTensor
29
- attention_mask: torch.FloatTensor
30
-
31
-
32
  class CvtProjectionHead(torch.nn.Module):
33
 
34
  def __init__(self, config) -> None:
@@ -62,7 +59,7 @@ class MultiCvtWithProjectionHead(transformers.CvtPreTrainedModel):
62
  pixel_values: Optional[torch.Tensor] = None,
63
  output_hidden_states: Optional[bool] = None,
64
  return_dict: Optional[bool] = None,
65
- ) -> Union[Tuple, ModelOutputWithProjectionEmbedding]:
66
 
67
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
68
 
@@ -88,7 +85,7 @@ class MultiCvtWithProjectionHead(transformers.CvtPreTrainedModel):
88
  if not return_dict:
89
  return projection
90
 
91
- return ModelOutputWithProjectionEmbedding(
92
  last_hidden_state=projection, attention_mask=attention_mask,
93
  )
94
 
 
1
  import os
2
  import warnings
3
+ from dataclasses import dataclass
4
  from typing import Any, Optional, Tuple, Union
5
 
6
  import torch
 
10
  from transformers import (AutoModel, PreTrainedTokenizerFast,
11
  VisionEncoderDecoderModel)
12
  from transformers.configuration_utils import PretrainedConfig
13
+ from transformers.modeling_outputs import (BaseModelOutput, ModelOutput,
14
+ Seq2SeqLMOutput)
15
  from transformers.modeling_utils import PreTrainedModel
16
  from transformers.models.vision_encoder_decoder.configuration_vision_encoder_decoder import \
17
  VisionEncoderDecoderConfig
 
26
  self.projection_size = projection_size
27
 
28
 
 
 
 
 
 
29
  class CvtProjectionHead(torch.nn.Module):
30
 
31
  def __init__(self, config) -> None:
 
59
  pixel_values: Optional[torch.Tensor] = None,
60
  output_hidden_states: Optional[bool] = None,
61
  return_dict: Optional[bool] = None,
62
+ ) -> Union[Tuple, ModelOutput]:
63
 
64
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
65
 
 
85
  if not return_dict:
86
  return projection
87
 
88
+ return ModelOutput(
89
  last_hidden_state=projection, attention_mask=attention_mask,
90
  )
91