Washedashore commited on
Commit
5ee530e
·
verified ·
1 Parent(s): 39a4ed4

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +52 -1
Dockerfile CHANGED
@@ -1,4 +1,55 @@
1
- # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  # you will also find guides on how best to write your Dockerfile
3
 
4
  FROM python:3.9
 
1
+ from transformers import AutoModel, AutoTokenizer, AutoFeatureExtractor
2
+ import torch
3
+
4
+ # Load pre-trained text and vision models
5
+ text_model = AutoModel.from_pretrained("bert-base-uncased")
6
+ vision_model = AutoModel.from_pretrained("google/vit-base-patch16-224")
7
+
8
+ # Define a simple multimodal model
9
+ class SimpleMLLM(torch.nn.Module):
10
+ def __init__(self, text_model, vision_model):
11
+ super().__init__()
12
+ self.text_model = text_model
13
+ self.vision_model = vision_model
14
+ self.fusion = torch.nn.Linear(text_model.config.hidden_size + vision_model.config.hidden_size, 512)
15
+
16
+ def forward(self, input_ids, attention_mask, pixel_values):
17
+ text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask)
18
+ vision_outputs = self.vision_model(pixel_values=pixel_values)
19
+
20
+ # Simple fusion of text and vision features
21
+ fused = torch.cat([text_outputs.last_hidden_state[:, 0], vision_outputs.last_hidden_state[:, 0]], dim=1)
22
+ output = self.fusion(fused)
23
+ return output
24
+
25
+ # Initialize the model
26
+ model = SimpleMLLM(text_model, vision_model)from transformers import AutoModel, AutoTokenizer, AutoFeatureExtractor
27
+ import torch
28
+
29
+ # Load pre-trained text and vision models
30
+ text_model = AutoModel.from_pretrained("bert-base-uncased")
31
+ vision_model = AutoModel.from_pretrained("google/vit-base-patch16-224")
32
+
33
+ # Define a simple multimodal model
34
+ class SimpleMLLM(torch.nn.Module):
35
+ def __init__(self, text_model, vision_model):
36
+ super().__init__()
37
+ self.text_model = text_model
38
+ self.vision_model = vision_model
39
+ self.fusion = torch.nn.Linear(text_model.config.hidden_size + vision_model.config.hidden_size, 512)
40
+
41
+ def forward(self, input_ids, attention_mask, pixel_values):
42
+ text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask)
43
+ vision_outputs = self.vision_model(pixel_values=pixel_values)
44
+
45
+ # Simple fusion of text and vision features
46
+ fused = torch.cat([text_outputs.last_hidden_state[:, 0], vision_outputs.last_hidden_state[:, 0]], dim=1)
47
+ output = self.fusion(fused)
48
+ return output
49
+
50
+ # Initialize the model
51
+ model = SimpleMLLM(text_model, vision_model)
52
+ read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
53
  # you will also find guides on how best to write your Dockerfile
54
 
55
  FROM python:3.9