File size: 3,952 Bytes
c1ee666
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import onnxruntime
import numpy as np
from transformers import AutoTokenizer
from huggingface_hub import hf_hub_download

verbalizer_model_name = "skypro1111/mbart-large-50-verbalization"

def cache_model_from_hf(repo_id, model_dir="./"):
    """Download ONNX models from HuggingFace Hub."""
    files = ["onnx/encoder_model.onnx", "onnx/decoder_model.onnx", "onnx/decoder_model.onnx_data"]
    
    for file in files:
        hf_hub_download(
            repo_id=repo_id,
            filename=file,
            local_dir=model_dir,
        )



class Verbalizer():
    def __init__(self, device):
        cache_model_from_hf(verbalizer_model_name)

        print("Loading tokenizer...")
        self.tokenizer = AutoTokenizer.from_pretrained(verbalizer_model_name)
        self.tokenizer.src_lang = "uk_UA"
        self.tokenizer.tgt_lang = "uk_UA"

        print("Creating ONNX sessions...")
        self.encoder_session = self.create_onnx_session("onnx/encoder_model.onnx", device=='cuda')
        self.decoder_session = self.create_onnx_session("onnx/decoder_model.onnx", device=='cuda')

    
    def create_onnx_session(self, model_path, use_gpu=True):
        """Create an ONNX inference session."""
        session_options = onnxruntime.SessionOptions()
        session_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
        session_options.enable_mem_pattern = True
        session_options.enable_mem_reuse = True
        session_options.intra_op_num_threads = 8
        #session_options.log_severity_level = 1
        
        cuda_provider_options = {
            'device_id': 0,
            'arena_extend_strategy': 'kSameAsRequested',
            'gpu_mem_limit': 0,  # 0 means no limit
            'cudnn_conv_algo_search': 'DEFAULT',
            'do_copy_in_default_stream': True,
        }
        
        if use_gpu and 'CUDAExecutionProvider' in onnxruntime.get_available_providers():
            providers = [('CUDAExecutionProvider', cuda_provider_options)]
        else:
            providers = ['CPUExecutionProvider']
        
        session = onnxruntime.InferenceSession(
            model_path,
            providers=providers,
            sess_options=session_options
        )
        
        return session
    
    
    def generate_text(self, text):
        """Generate text for a single input."""
        # Prepare input
        inputs = self.tokenizer(text, return_tensors="np", padding=True, truncation=True, max_length=512)
        input_ids = inputs["input_ids"].astype(np.int64)
        attention_mask = inputs["attention_mask"].astype(np.int64)
        
        # Run encoder
        encoder_outputs = self.encoder_session.run(
            output_names=["last_hidden_state"],
            input_feed={
                "input_ids": input_ids,
                "attention_mask": attention_mask,
            }
        )[0]
        
        # Initialize decoder input
        decoder_input_ids = np.array([[self.tokenizer.pad_token_id]], dtype=np.int64)
        
        # Generate sequence
        for _ in range(512):
            # Run decoder
            decoder_outputs = self.decoder_session.run(
                output_names=["logits"],
                input_feed={
                    "input_ids": decoder_input_ids,
                    "encoder_hidden_states": encoder_outputs,
                    "encoder_attention_mask": attention_mask,
                }
            )[0]
            
            # Get next token
            next_token = decoder_outputs[:, -1:].argmax(axis=-1)
            decoder_input_ids = np.concatenate([decoder_input_ids, next_token], axis=-1)
            
            # Check if sequence is complete
            if self.tokenizer.eos_token_id in decoder_input_ids[0]:
                break
        
        # Decode sequence
        output_text = self.tokenizer.decode(decoder_input_ids[0], skip_special_tokens=True)
        return output_text