File size: 2,697 Bytes
5ee530e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39a4ed4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
from transformers import AutoModel, AutoTokenizer, AutoFeatureExtractor
import torch

# Load pre-trained text and vision models
text_model = AutoModel.from_pretrained("bert-base-uncased")
vision_model = AutoModel.from_pretrained("google/vit-base-patch16-224")

# Define a simple multimodal model
class SimpleMLLM(torch.nn.Module):
    def __init__(self, text_model, vision_model):
        super().__init__()
        self.text_model = text_model
        self.vision_model = vision_model
        self.fusion = torch.nn.Linear(text_model.config.hidden_size + vision_model.config.hidden_size, 512)
    
    def forward(self, input_ids, attention_mask, pixel_values):
        text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask)
        vision_outputs = self.vision_model(pixel_values=pixel_values)
        
        # Simple fusion of text and vision features
        fused = torch.cat([text_outputs.last_hidden_state[:, 0], vision_outputs.last_hidden_state[:, 0]], dim=1)
        output = self.fusion(fused)
        return output

# Initialize the model
model = SimpleMLLM(text_model, vision_model)from transformers import AutoModel, AutoTokenizer, AutoFeatureExtractor
import torch

# Load pre-trained text and vision models
text_model = AutoModel.from_pretrained("bert-base-uncased")
vision_model = AutoModel.from_pretrained("google/vit-base-patch16-224")

# Define a simple multimodal model
class SimpleMLLM(torch.nn.Module):
    def __init__(self, text_model, vision_model):
        super().__init__()
        self.text_model = text_model
        self.vision_model = vision_model
        self.fusion = torch.nn.Linear(text_model.config.hidden_size + vision_model.config.hidden_size, 512)
    
    def forward(self, input_ids, attention_mask, pixel_values):
        text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask)
        vision_outputs = self.vision_model(pixel_values=pixel_values)
        
        # Simple fusion of text and vision features
        fused = torch.cat([text_outputs.last_hidden_state[:, 0], vision_outputs.last_hidden_state[:, 0]], dim=1)
        output = self.fusion(fused)
        return output

# Initialize the model
model = SimpleMLLM(text_model, vision_model)
 read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
# you will also find guides on how best to write your Dockerfile

FROM python:3.9

RUN useradd -m -u 1000 user
USER user
ENV PATH="/home/user/.local/bin:$PATH"

WORKDIR /app

COPY --chown=user ./requirements.txt requirements.txt
RUN pip install --no-cache-dir --upgrade -r requirements.txt

COPY --chown=user . /app
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]