File size: 2,588 Bytes
97a778d
 
ef41f4d
97a778d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from src.classmodels.inputforgeneration import InputForGeneration
from src.errorlog.errorlog import log_error
from pathlib import Path

#MODEL NAME AS PER IN HUGGING FACE :- TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
current_folderpath = Path(__file__).resolve().parent

tokenizer = None
quantised_model = None
tokenizer_path = None
model_path = None

additional_kwargs = {
  "do_sample" : True,
  "early_stopping" :True,
   "num_beams" : 5,
   "no_repeat_ngram_size" : 5,
    "truncation" : True
}

TASK_NAME = "text-generation"

def isModelAvailable():    
    model_path = current_folderpath / "model"
    if model_path is not None and len(str(model_path).strip()) > 0:
        return True
    else:
        return False

def isTokenizerAvailable():
    tokenizer_path = current_folderpath / "tokenizer"
    if tokenizer_path is not None and len(str(tokenizer_path).strip()) > 0:
        return True
    else:
        return False
    
def warmupTextGenerationModel():
    try:
        if isModelAvailable() and isTokenizerAvailable():
            tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
            quantised_model = AutoModelForCausalLM.from_pretrained(model_path, use_safetensors = True)
            return "text generation model is warm up"
        else:
            return "No model/tokenizer folder found..."
    except Exception as ex:
        log_error(str(ex))
        return "Issue occured when warming up the text generation model. Please try again.."

def generateText(inputSettings: InputForGeneration):
    try:
        if tokenizer is not None and quantised_model is not None:
            pipe = pipeline(task= TASK_NAME, model= quantised_model, tokenizer = tokenizer, device_map = "auto")
        
            #formatted prompt for LLama Model 
            prompt = f"<s>[INST] {inputSettings.input_for_generation} [/INST]"
            generated_text = pipe(prompt,temperature = inputSettings.temperature, max_length = inputSettings.max_length,
                               **additional_kwargs)
        
            if generated_text is not None and generated_text[0]['generated_text'] is not None:
                 return generated_text[0]['generated_text'].replace("<s>","").replace("[INST]","").replace("[/INST]","")
            
        else:
            #If tokenizer or model is not captured, notify as an issue in generation
            return None
                    
    except Exception as ex:
        log_error(str(ex))
        return ""