File size: 374 Bytes
be4383d f0cd3d6 be4383d |
1 2 3 4 5 6 7 |
from transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline
tokenizer = BertTokenizer.from_pretrained("uer/gpt2-chinese-cluecorpussmall")
model = GPT2LMHeadModel.from_pretrained("uer/gpt2-chinese-cluecorpussmall")
text_generator = TextGenerationPipeline(model, tokenizer)
text_generator("这是很久之前的事情了", max_length=100, do_sample=True)
|