File size: 795 Bytes
a25118a |
1 2 3 4 5 6 7 8 9 10 11 12 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
mode_name = 'DDDSSS/translation_en-zh'
device = 'cpu'
model = AutoModelForSeq2SeqLM.from_pretrained(mode_name)
# Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
tokenizer = AutoTokenizer.from_pretrained(mode_name)
translation = pipeline("translation_en_to_zh", model=model, tokenizer=tokenizer,
torch_dtype="float", device_map=True,device=device)
x=["If nothing is detected and there is a config.json file, it’s assumed the library is transformers.","By looking into the presence of files such as *.nemo or *saved_model.pb*, the Hub can determine if a model is from NeMo or Keras."]
re = translation(x, max_length=512)
print('翻译为:' ,re) |