shunxing1234
commited on
Commit
·
3554f56
1
Parent(s):
14ae5a4
Update README.md
Browse files
README.md
CHANGED
@@ -61,6 +61,34 @@ with torch.no_grad():
|
|
61 |
print(out)
|
62 |
```
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
|
66 |
## 证书/License
|
|
|
61 |
print(out)
|
62 |
```
|
63 |
|
64 |
+
利用NBCE进行推理
|
65 |
+
|
66 |
+
```python
|
67 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
68 |
+
import torch
|
69 |
+
from cyg_conversation import covert_prompt_to_input_ids_with_history
|
70 |
+
|
71 |
+
tokenizer = AutoTokenizer.from_pretrained("BAAI/AquilaChat-7B")
|
72 |
+
model = AutoModelForCausalLM.from_pretrained("BAAI/AquilaChat-7B")
|
73 |
+
model.eval()
|
74 |
+
model.to("cuda:0")
|
75 |
+
vocab = tokenizer.vocab
|
76 |
+
print(len(vocab))
|
77 |
+
|
78 |
+
text = "请给出10个要到北京旅游的理由。"
|
79 |
+
|
80 |
+
tokens = covert_prompt_to_input_ids_with_history(text, history=[], tokenizer=tokenizer, max_token=512)
|
81 |
+
|
82 |
+
tokens = torch.tensor(tokens)[None,].to("cuda:0")
|
83 |
+
|
84 |
+
|
85 |
+
with torch.no_grad():
|
86 |
+
out = model.generate(tokens, do_sample=True, max_length=512, eos_token_id=100007)[0]
|
87 |
+
|
88 |
+
out = tokenizer.decode(out.cpu().numpy().tolist())
|
89 |
+
|
90 |
+
print(out)
|
91 |
+
```
|
92 |
|
93 |
|
94 |
## 证书/License
|