|
import torch , transformers |
|
import sys, os |
|
sys.path.append( |
|
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) |
|
from transformers import LlamaTokenizer, TextGenerationPipeline, AutoModelForCausalLM |
|
from yuan_moe_hf_model import YuanForCausalLM |
|
|
|
|
|
|
|
device = "cuda" |
|
|
|
quantized_model_dir = "/temp_data/LLM_test/MOE/Yuan2-M32-int4-hf" |
|
|
|
tokenizer = LlamaTokenizer.from_pretrained('/temp_data/LLM_test/MOE/Yuan2-M32-int4-hf/', add_eos_token=False, add_bos_token=False, eos_token='<eod>') |
|
tokenizer.add_tokens(['<sep>', '<pad>', '<mask>', '<predict>', '<FIM_SUFFIX>', '<FIM_PREFIX>', '<FIM_MIDDLE>','<commit_before>','<commit_msg>','<commit_after>','<jupyter_start>','<jupyter_text>','<jupyter_code>','<jupyter_output>','<empty_output>'], special_tokens=True) |
|
|
|
model = YuanForCausalLM.from_pretrained(quantized_model_dir, trust_remote_code=True, use_safetensors=True).to(device) |
|
|
|
print(tokenizer.decode(model.generate(**tokenizer("北京是中国的", return_tensors="pt").to(device), max_new_tokens=256)[0])) |
|
|
|
|
|
|
|
|
|
|