Upload model
Browse files
modelling_longitudinal.py
CHANGED
@@ -25,7 +25,7 @@ class CvtWithProjectionHeadConfig(transformers.CvtConfig):
|
|
25 |
|
26 |
|
27 |
class ModelOutputWithProjectionEmbedding(transformers.modeling_outputs.ModelOutput):
|
28 |
-
|
29 |
attention_mask: torch.FloatTensor
|
30 |
|
31 |
|
@@ -89,7 +89,7 @@ class VariableCvtWithProjectionHead(transformers.CvtPreTrainedModel):
|
|
89 |
return projection
|
90 |
|
91 |
return ModelOutputWithProjectionEmbedding(
|
92 |
-
|
93 |
)
|
94 |
|
95 |
|
@@ -245,7 +245,7 @@ class LongitudinalPromptVariableCXREncoderDecoderModel(VisionEncoderDecoderModel
|
|
245 |
decoder_hidden_states=decoder_outputs.hidden_states,
|
246 |
decoder_attentions=decoder_outputs.attentions,
|
247 |
cross_attentions=decoder_outputs.cross_attentions,
|
248 |
-
encoder_last_hidden_state=encoder_outputs.
|
249 |
# encoder_hidden_states=encoder_outputs.hidden_states,
|
250 |
# encoder_attentions=encoder_outputs.attentions,
|
251 |
)
|
|
|
25 |
|
26 |
|
27 |
class ModelOutputWithProjectionEmbedding(transformers.modeling_outputs.ModelOutput):
|
28 |
+
last_hidden_state: torch.FloatTensor
|
29 |
attention_mask: torch.FloatTensor
|
30 |
|
31 |
|
|
|
89 |
return projection
|
90 |
|
91 |
return ModelOutputWithProjectionEmbedding(
|
92 |
+
last_hidden_state=projection, attention_mask=attention_mask,
|
93 |
)
|
94 |
|
95 |
|
|
|
245 |
decoder_hidden_states=decoder_outputs.hidden_states,
|
246 |
decoder_attentions=decoder_outputs.attentions,
|
247 |
cross_attentions=decoder_outputs.cross_attentions,
|
248 |
+
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
|
249 |
# encoder_hidden_states=encoder_outputs.hidden_states,
|
250 |
# encoder_attentions=encoder_outputs.attentions,
|
251 |
)
|