Spaces:
Sleeping
Sleeping
File size: 961 Bytes
44d180e c62a32e 44d180e f3984f1 44d180e b9e79ed fcae4c8 da4acea 44d180e 4e6ddea 44d180e 100f84a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
from transformers import AutoTokenizer, AutoModelForCausalLM
import transformers
import torch
class Model():
def __init__(self, model="lmsys/vicuna-7b-v1.5") -> None:
pass
self.tokenizer = AutoTokenizer.from_pretrained(model)
self.pipeline = transformers.pipeline(
"text-generation",
model=model,
tokenizer=self.tokenizer,
torch_dtype=torch.bfloat16,
trust_remote_code=True,
device_map="auto",
)
def gen(self, content, temp=0.1, max_length=500):
sequences = self.pipeline(
content,
max_new_tokens=max_length,
do_sample=True,
temperature=temp,
num_return_sequences=1,
eos_token_id=self.tokenizer.eos_token_id,
return_full_text=False
)
return sequences[-1]['generated_text'] #'\n'.join([seq['generated_text'] for seq in sequences]) |