update code in colab (#1)
Browse files- update code in colab (c3f3fd3f261f12d4bdc6f7db19c0f5a5ff56bafb)
Co-authored-by: XueHang <[email protected]>
README.md
CHANGED
@@ -15,7 +15,7 @@ widget:
|
|
15 |
---
|
16 |
|
17 |
|
18 |
-
ChatYuan-large-v2
|
19 |
|
20 |
ChatYuan-large-v2 is a functional dialogue language model that supports bilingual Chinese and English.
|
21 |
ChatYuan-large-v2 uses the same technical solution as the v1 version, and has been optimized in terms of instruct-tuning, human feedback reinforcement learning and chain-of-thought.
|
@@ -23,7 +23,7 @@ ChatYuan-large-v2 uses the same technical solution as the v1 version, and has be
|
|
23 |
<a href='https://huggingface.co/spaces/ClueAI/ChatYuan-large-v2' target="__blank">在线Demo</a> |
|
24 |
<a href='https://www.clueai.cn' target="__blank">使用API(large版)</a> |
|
25 |
<a href='https://github.com/clue-ai/ChatYuan' target="__blank">Github项目地址</a> |
|
26 |
-
<a href='https://colab.research.google.com/drive/
|
27 |
<a href='https://mp.weixin.qq.com/s/FtXAnrhavA5u7hRyfm8j6Q' target="__blank">文章介绍</a>
|
28 |
|
29 |
|
@@ -74,15 +74,11 @@ Based on the original functions of Chatyuan-large-v1, we optimized the model as
|
|
74 |
加载模型:
|
75 |
|
76 |
```python
|
77 |
-
#
|
78 |
-
from transformers import
|
79 |
-
tokenizer =
|
80 |
-
model = T5ForConditionalGeneration.from_pretrained("ClueAI/ChatYuan-large-v2")
|
81 |
# 该加载方式,在最大长度为512时 大约需要6G多显存
|
82 |
-
# 如显存不够,可采用以下方式加载,进一步减少显存需求,约为3G
|
83 |
-
# model = T5ForConditionalGeneration.from_pretrained("ClueAI/ChatYuan-large-v2").half()
|
84 |
-
|
85 |
-
|
86 |
|
87 |
```
|
88 |
|
@@ -90,10 +86,7 @@ model = T5ForConditionalGeneration.from_pretrained("ClueAI/ChatYuan-large-v2")
|
|
90 |
```python
|
91 |
# 使用
|
92 |
import torch
|
93 |
-
from transformers import AutoTokenizer
|
94 |
# 修改colab笔记本设置为gpu,推理更快
|
95 |
-
device = torch.device('cuda')
|
96 |
-
model.to(device)
|
97 |
def preprocess(text):
|
98 |
text = text.replace("\n", "\\n").replace("\t", "\\t")
|
99 |
return text
|
@@ -105,12 +98,12 @@ def answer(text, sample=True, top_p=1, temperature=0.7):
|
|
105 |
'''sample:是否抽样。生成任务,可以设置为True;
|
106 |
top_p:0-1之间,生成的内容越多样'''
|
107 |
text = preprocess(text)
|
108 |
-
encoding = tokenizer(text=[text], truncation=True, padding=True, max_length=
|
109 |
if not sample:
|
110 |
-
out = model.generate(**encoding, return_dict_in_generate=True, output_scores=False,
|
111 |
else:
|
112 |
-
out = model.generate(**encoding, return_dict_in_generate=True, output_scores=False,
|
113 |
-
out_text = tokenizer.batch_decode(out[
|
114 |
return postprocess(out_text[0])
|
115 |
print("end...")
|
116 |
```
|
|
|
15 |
---
|
16 |
|
17 |
|
18 |
+
ChatYuan-large-v2是一个支持中英双语的功能型对话语言大模型。v2使用了和 v1版本相同的技术方案,在指令微调、人类反馈强化学习、思维链等方面进行了优化。
|
19 |
|
20 |
ChatYuan-large-v2 is a functional dialogue language model that supports bilingual Chinese and English.
|
21 |
ChatYuan-large-v2 uses the same technical solution as the v1 version, and has been optimized in terms of instruct-tuning, human feedback reinforcement learning and chain-of-thought.
|
|
|
23 |
<a href='https://huggingface.co/spaces/ClueAI/ChatYuan-large-v2' target="__blank">在线Demo</a> |
|
24 |
<a href='https://www.clueai.cn' target="__blank">使用API(large版)</a> |
|
25 |
<a href='https://github.com/clue-ai/ChatYuan' target="__blank">Github项目地址</a> |
|
26 |
+
<a href='https://colab.research.google.com/drive/1JTSKy2HntPYHi6UvmUiwdHisFx1UrWlJ?usp=sharing' target="__blank">Colab在线试用</a> |
|
27 |
<a href='https://mp.weixin.qq.com/s/FtXAnrhavA5u7hRyfm8j6Q' target="__blank">文章介绍</a>
|
28 |
|
29 |
|
|
|
74 |
加载模型:
|
75 |
|
76 |
```python
|
77 |
+
# 加载模型,直接从paddlenlp中加载
|
78 |
+
from paddlenlp.transformers import AutoTokenizer, T5ForConditionalGeneration
|
79 |
+
tokenizer = AutoTokenizer.from_pretrained("ClueAI/ChatYuan-large-v2", from_hf_hub=False)
|
80 |
+
model = T5ForConditionalGeneration.from_pretrained("ClueAI/ChatYuan-large-v2", from_hf_hub=False)
|
81 |
# 该加载方式,在最大长度为512时 大约需要6G多显存
|
|
|
|
|
|
|
|
|
82 |
|
83 |
```
|
84 |
|
|
|
86 |
```python
|
87 |
# 使用
|
88 |
import torch
|
|
|
89 |
# 修改colab笔记本设置为gpu,推理更快
|
|
|
|
|
90 |
def preprocess(text):
|
91 |
text = text.replace("\n", "\\n").replace("\t", "\\t")
|
92 |
return text
|
|
|
98 |
'''sample:是否抽样。生成任务,可以设置为True;
|
99 |
top_p:0-1之间,生成的内容越多样'''
|
100 |
text = preprocess(text)
|
101 |
+
encoding = tokenizer(text=[text], truncation=True, padding=True, max_length=768, return_tensors="pd")
|
102 |
if not sample:
|
103 |
+
out = model.generate(**encoding, return_dict_in_generate=True, output_scores=False, max_length=512, num_beams=1, length_penalty=0.4)
|
104 |
else:
|
105 |
+
out = model.generate(**encoding, return_dict_in_generate=True, output_scores=False, max_length=512, do_sample=True, top_p=top_p, temperature=temperature, no_repeat_ngram_size=3)
|
106 |
+
out_text = tokenizer.batch_decode(out[0], skip_special_tokens=True)
|
107 |
return postprocess(out_text[0])
|
108 |
print("end...")
|
109 |
```
|