ppak10 commited on
Commit
3e60839
·
1 Parent(s): 572e75b

Adds config and model file for pipeline.

Browse files
Files changed (2) hide show
  1. config.json +19 -0
  2. model.py +19 -0
config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "llama",
3
+ "model_path": "meta-llama/Llama-3.2-1B",
4
+ "num_labels": 4,
5
+ "head_dim": 64,
6
+ "hidden_size": 2048,
7
+ "id2label": {
8
+ "0": "None",
9
+ "1": "Keyhole",
10
+ "2": "Lack of Fusion",
11
+ "3": "Balling"
12
+ },
13
+ "label2id": {
14
+ "None": 0,
15
+ "Keyhole": 1,
16
+ "Lack of Fusion": 2,
17
+ "Balling": 3
18
+ }
19
+ }
model.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ from transformers import AutoModel, PreTrainedModel
3
+
4
+ class LlamaClassificationModel(PreTrainedModel):
5
+ def __init__(self, config):
6
+ super().__init__(config)
7
+ self.base_model = AutoModel.from_pretrained(config.model_path, config=config)
8
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
9
+ self.config = config
10
+
11
+ def forward(self, input_ids, attention_mask, labels=None):
12
+ outputs = self.base_model(input_ids=input_ids, attention_mask=attention_mask)
13
+ summed_representation = outputs.last_hidden_state.sum(dim=1)
14
+ logits = self.classifier(summed_representation)
15
+ loss = None
16
+ if labels is not None:
17
+ loss_fn = nn.BCEWithLogitsLoss()
18
+ loss = loss_fn(logits, labels.float())
19
+ return {"loss": loss, "logits": logits}