torinriley commited on
Commit
7a57069
·
verified ·
1 Parent(s): 96bca39

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +91 -0
handler.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Dict, List, Any
3
+ from tokenizers import Tokenizer
4
+ from model import build_transformer
5
+ import warnings
6
+
7
+ warnings.simplefilter("ignore", category=FutureWarning)
8
+
9
+ class EndpointHandler:
10
+ def __init__(self, path: str = ""):
11
+ """
12
+ Initialize the handler. Load the model and tokenizer.
13
+ """
14
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+ self.device = device
16
+
17
+ # Path to weights and tokenizers
18
+ self.model_weights_path = path + "/SAVE.pt" # Ensure SAVE.pt is uploaded in the repository
19
+ self.tokenizer_src_path = path + "/tokenizer_en.json"
20
+ self.tokenizer_tgt_path = path + "/tokenizer_it.json"
21
+
22
+ # Load tokenizers
23
+ self.tokenizer_src = Tokenizer.from_file(self.tokenizer_src_path)
24
+ self.tokenizer_tgt = Tokenizer.from_file(self.tokenizer_tgt_path)
25
+
26
+ # Build the transformer model
27
+ self.model = build_transformer(
28
+ src_vocab_size=self.tokenizer_src.get_vocab_size(),
29
+ tgt_vocab_size=self.tokenizer_tgt.get_vocab_size(),
30
+ src_seq_len=350, # Match the trained model's sequence length
31
+ tgt_seq_len=350, # Match the trained model's sequence length
32
+ d_model=512,
33
+ num_layers=6,
34
+ num_heads=8,
35
+ dropout=0.1,
36
+ d_ff=2048
37
+ ).to(self.device)
38
+
39
+ # Load the pretrained weights
40
+ print(f"Loading weights from: {self.model_weights_path}")
41
+ checkpoint = torch.load(self.model_weights_path, map_location=self.device)
42
+ self.model.load_state_dict(checkpoint["model_state_dict"])
43
+ self.model.eval()
44
+
45
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
46
+ """
47
+ Process the incoming request and return the translation.
48
+ """
49
+ try:
50
+ inputs = data.get("inputs", "")
51
+ if not inputs:
52
+ return [{"error": "No 'inputs' provided in request"}]
53
+
54
+ # Precompute the encoder output
55
+ source = self.tokenizer_src.encode(inputs)
56
+ source = torch.cat([
57
+ torch.tensor([self.tokenizer_src.token_to_id("[SOS]")], dtype=torch.int64),
58
+ torch.tensor(source.ids, dtype=torch.int64),
59
+ torch.tensor([self.tokenizer_src.token_to_id("[EOS]")], dtype=torch.int64),
60
+ torch.tensor([self.tokenizer_src.token_to_id("[PAD]")] * (350 - len(source.ids) - 2), dtype=torch.int64)
61
+ ], dim=0).to(self.device)
62
+ source_mask = (source != self.tokenizer_src.token_to_id("[PAD]")).unsqueeze(0).unsqueeze(1).int().to(self.device)
63
+ encoder_output = self.model.encode(source, source_mask)
64
+
65
+ # Generate translation word by word
66
+ decoder_input = torch.empty(1, 1).fill_(self.tokenizer_tgt.token_to_id("[SOS]")).type_as(source).to(self.device)
67
+ predicted_words = []
68
+
69
+ while decoder_input.size(1) < 350:
70
+ decoder_mask = torch.triu(
71
+ torch.ones((1, decoder_input.size(1), decoder_input.size(1))),
72
+ diagonal=1
73
+ ).type(torch.int).type_as(source_mask).to(self.device)
74
+ out = self.model.decode(encoder_output, source_mask, decoder_input, decoder_mask)
75
+
76
+ # Project next token
77
+ prob = self.model.project(out[:, -1])
78
+ _, next_word = torch.max(prob, dim=1)
79
+ decoder_input = torch.cat(
80
+ [decoder_input, torch.empty(1, 1).type_as(source).fill_(next_word.item()).to(self.device)], dim=1)
81
+
82
+ decoded_word = self.tokenizer_tgt.decode([next_word.item()])
83
+ if next_word == self.tokenizer_tgt.token_to_id("[EOS]"):
84
+ break
85
+
86
+ predicted_words.append(decoded_word)
87
+
88
+ predicted_translation = " ".join(predicted_words).replace("[EOS]", "").strip()
89
+ return [{"translation": predicted_translation}]
90
+ except Exception as e:
91
+ return [{"error": str(e)}]