File size: 1,139 Bytes
c6acb0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
from transformers import AutoModelForCausalLM, AutoTokenizer

# 讟注讬谞转 讛诪讜讚诇 讜讛-tokenizer
model = AutoModelForCausalLM.from_pretrained("gpt2")
tokenizer = AutoTokenizer.from_pretrained("gpt2")

while True:
    # 拽讘诇转 驻专讜诪驻讟 诪讛诪砖转诪砖
    prompt = input("Enter your prompt (or type 'exit' to quit): ")
    
    if prompt.lower() == "exit":
        print("Exiting the chatbot.")
        break

    # 讬爪讬专转 input_ids 讜-attention_mask 诪讛驻专讜诪驻讟 砖讛讜讝谉
    inputs = tokenizer(prompt, return_tensors="pt")
    input_ids = inputs.input_ids
    attention_mask = inputs.attention_mask  # 讛讜住驻转 attention_mask

    # 讬爪讬专转 讟拽住讟 讘注讝专转 讛诪讜讚诇
    gen_tokens = model.generate(
        input_ids,
        attention_mask=attention_mask,  # 讛讙讚专转 attention_mask
        do_sample=True,
        temperature=0.9,
        max_length=100,
        pad_token_id=tokenizer.eos_token_id  # 讛讙讚专转 pad_token_id 诇-eos_token_id
    )

    # 驻注谞讜讞 讛转讙讜讘讛 诪讛诪讜讚诇
    gen_text = tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)[0]
    print("Response:", gen_text)