Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
|
3 |
+
# 讟注讬谞转 讛诪讜讚诇 讜讛-tokenizer
|
4 |
+
model = AutoModelForCausalLM.from_pretrained("gpt2")
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
6 |
+
|
7 |
+
while True:
|
8 |
+
# 拽讘诇转 驻专讜诪驻讟 诪讛诪砖转诪砖
|
9 |
+
prompt = input("Enter your prompt (or type 'exit' to quit): ")
|
10 |
+
|
11 |
+
if prompt.lower() == "exit":
|
12 |
+
print("Exiting the chatbot.")
|
13 |
+
break
|
14 |
+
|
15 |
+
# 讬爪讬专转 input_ids 讜-attention_mask 诪讛驻专讜诪驻讟 砖讛讜讝谉
|
16 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
17 |
+
input_ids = inputs.input_ids
|
18 |
+
attention_mask = inputs.attention_mask # 讛讜住驻转 attention_mask
|
19 |
+
|
20 |
+
# 讬爪讬专转 讟拽住讟 讘注讝专转 讛诪讜讚诇
|
21 |
+
gen_tokens = model.generate(
|
22 |
+
input_ids,
|
23 |
+
attention_mask=attention_mask, # 讛讙讚专转 attention_mask
|
24 |
+
do_sample=True,
|
25 |
+
temperature=0.9,
|
26 |
+
max_length=100,
|
27 |
+
pad_token_id=tokenizer.eos_token_id # 讛讙讚专转 pad_token_id 诇-eos_token_id
|
28 |
+
)
|
29 |
+
|
30 |
+
# 驻注谞讜讞 讛转讙讜讘讛 诪讛诪讜讚诇
|
31 |
+
gen_text = tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)[0]
|
32 |
+
print("Response:", gen_text)
|