File size: 672 Bytes
adf4873 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
# Global variables to cache model and tokenizer
model = None
tokenizer = None
nlp = None
def init():
global model, tokenizer, nlp
model_name_or_path = "."
model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
nlp = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
def inference(payload):
inputs = payload.get("inputs", "")
if not inputs:
return {"error": "No inputs provided"}
# Run generation pipeline
outputs = nlp(inputs, max_length=256)
return outputs
|