File size: 413 Bytes
8fa2c17 |
1 2 3 4 5 6 7 8 9 10 11 12 |
import os
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:32"
import os
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("tunning/chatglm2-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("tunning/chatglm2-6b", trust_remote_code=True).cuda()
model = model.eval()
response, history = model.chat(tokenizer, "你好", history=[])
print(response)
|