Commit
·
bd17d54
1
Parent(s):
c1b22b4
Initial commit
Browse files- README.md +54 -0
- config.json +30 -0
- flax_model.msgpack +3 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- tf_model.h5 +3 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
README.md
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
thumbnail: https://huggingface.co/front/thumbnails/dialogpt.png
|
3 |
+
tags:
|
4 |
+
- conversational
|
5 |
+
license: mit
|
6 |
+
---
|
7 |
+
|
8 |
+
## A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)
|
9 |
+
|
10 |
+
DialoGPT is a SOTA large-scale pretrained dialogue response generation model for multiturn conversations.
|
11 |
+
The [human evaluation results](https://github.com/dreasysnail/Dialogpt_dev#human-evaluation) indicate that the response generated from DialoGPT is comparable to human response quality under a single-turn conversation Turing test.
|
12 |
+
The model is trained on 147M multi-turn dialogue from Reddit discussion thread.
|
13 |
+
|
14 |
+
* Multi-turn generation examples from an interactive environment:
|
15 |
+
|
16 |
+
|Role | Response |
|
17 |
+
|---------|--------|
|
18 |
+
|User | Does money buy happiness? |
|
19 |
+
| Bot | Depends how much money you spend on it .|
|
20 |
+
|User | What is the best way to buy happiness ? |
|
21 |
+
| Bot | You just have to be a millionaire by your early 20s, then you can be happy . |
|
22 |
+
|User |This is so difficult ! |
|
23 |
+
| Bot | You have no idea how hard it is to be a millionaire and happy . There is a reason the rich have a lot of money |
|
24 |
+
|
25 |
+
Please find the information about preprocessing, training and full details of the DialoGPT in the [original DialoGPT repository](https://github.com/microsoft/DialoGPT)
|
26 |
+
|
27 |
+
ArXiv paper: [https://arxiv.org/abs/1911.00536](https://arxiv.org/abs/1911.00536)
|
28 |
+
|
29 |
+
### How to use
|
30 |
+
|
31 |
+
Now we are ready to try out how the model works as a chatting partner!
|
32 |
+
|
33 |
+
```python
|
34 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
35 |
+
import torch
|
36 |
+
|
37 |
+
|
38 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
|
39 |
+
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
|
40 |
+
|
41 |
+
# Let's chat for 5 lines
|
42 |
+
for step in range(5):
|
43 |
+
# encode the new user input, add the eos_token and return a tensor in Pytorch
|
44 |
+
new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt')
|
45 |
+
|
46 |
+
# append the new user input tokens to the chat history
|
47 |
+
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids
|
48 |
+
|
49 |
+
# generated a response while limiting the total chat history to 1000 tokens,
|
50 |
+
chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
|
51 |
+
|
52 |
+
# pretty print last ouput tokens from bot
|
53 |
+
print("DialoGPT: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))
|
54 |
+
```
|
config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_function": "gelu_new",
|
3 |
+
"architectures": [
|
4 |
+
"GPT2LMHeadModel"
|
5 |
+
],
|
6 |
+
"attn_pdrop": 0.1,
|
7 |
+
"bos_token_id": 50256,
|
8 |
+
"embd_pdrop": 0.1,
|
9 |
+
"eos_token_id": 50256,
|
10 |
+
"initializer_range": 0.02,
|
11 |
+
"layer_norm_epsilon": 1e-05,
|
12 |
+
"model_type": "gpt2",
|
13 |
+
"n_ctx": 1024,
|
14 |
+
"n_embd": 1280,
|
15 |
+
"n_head": 20,
|
16 |
+
"n_layer": 36,
|
17 |
+
"n_positions": 1024,
|
18 |
+
"resid_pdrop": 0.1,
|
19 |
+
"summary_activation": null,
|
20 |
+
"summary_first_dropout": 0.1,
|
21 |
+
"summary_proj_to_labels": true,
|
22 |
+
"summary_type": "cls_index",
|
23 |
+
"summary_use_proj": true,
|
24 |
+
"task_specific_params": {
|
25 |
+
"conversational": {
|
26 |
+
"max_length": 1000
|
27 |
+
}
|
28 |
+
},
|
29 |
+
"vocab_size": 50257
|
30 |
+
}
|
flax_model.msgpack
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1c2f860888da95de393c1549d281e237915e427f929730d37fe25c7416d00f99
|
3 |
+
size 3096134690
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1165cf0e23661205408696184a2902b5208a4ff08b08b272d1e3e03d43da85e5
|
3 |
+
size 1752292117
|
tf_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76f7877aff5e6b9a04eca613cf806be484c86002fc742b1e3dd2f47fe7bb9594
|
3 |
+
size 3096617152
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model_max_length": 1024}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|