Spaces:
Build error
Build error
File size: 1,740 Bytes
0266d6e 5c20cf3 58d8d8a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from transformers import Trainer, TrainingArguments
training_args = TrainingArguments(
output_dir="./results",
evaluation_strategy="epoch",
learning_rate=2e-5,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
num_train_epochs=3,
weight_decay=0.01,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
trainer.train()from transformers import AutoModel, AutoTokenizer, AutoFeatureExtractor
import torch
# Load pre-trained text and vision models
text_model = AutoModel.from_pretrained("bert-base-uncased")
vision_model = AutoModel.from_pretrained("google/vit-base-patch16-224")
# Define a simple multimodal model
class SimpleMLLM(torch.nn.Module):
def __init__(self, text_model, vision_model):
super().__init__()
self.text_model = text_model
self.vision_model = vision_model
self.fusion = torch.nn.Linear(text_model.config.hidden_size + vision_model.config.hidden_size, 512)
def forward(self, input_ids, attention_mask, pixel_values):
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask)
vision_outputs = self.vision_model(pixel_values=pixel_values)
# Simple fusion of text and vision features
fused = torch.cat([text_outputs.last_hidden_state[:, 0], vision_outputs.last_hidden_state[:, 0]], dim=1)
output = self.fusion(fused)
return output
# Initialize the model
model = SimpleMLLM(text_model, vision_model)
import math
eval_results = trainer.evaluate()
print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")# You would then need to implement data loading, training loop, etc. |