add topk and typical
Browse files
app.py
CHANGED
@@ -23,7 +23,7 @@ if 'ON_COLAB' in os.environ and os.environ['ON_COLAB'] == '1':
|
|
23 |
model = RWKV(model=model_path, strategy='cuda bf16')
|
24 |
else:
|
25 |
model = RWKV(model=model_path, strategy='cpu bf16')
|
26 |
-
from
|
27 |
pipeline = PIPELINE(model, "20B_tokenizer.json")
|
28 |
|
29 |
def infer(
|
@@ -31,10 +31,12 @@ def infer(
|
|
31 |
token_count=10,
|
32 |
temperature=0.7,
|
33 |
top_p=1.0,
|
|
|
|
|
34 |
presencePenalty = 0.05,
|
35 |
countPenalty = 0.05,
|
36 |
):
|
37 |
-
args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p),
|
38 |
alpha_frequency = countPenalty,
|
39 |
alpha_presence = presencePenalty,
|
40 |
token_ban = [0], # ban the generation of some tokens
|
@@ -63,7 +65,7 @@ def infer(
|
|
63 |
for n in occurrence:
|
64 |
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
|
65 |
|
66 |
-
token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
|
67 |
if token in args.token_stop:
|
68 |
break
|
69 |
all_tokens += [token]
|
@@ -88,8 +90,8 @@ examples = [
|
|
88 |
|
89 |
女招待:是吗。那真是太好了
|
90 |
|
91 |
-
我因为撰稿的需要,而造访了这间位于信州山间的温泉宿驿。""", 200, 2.0, 0.4, 0.1, 0.1],
|
92 |
-
["翡翠:欢迎回来,志贵少爷。", 200, 2.0, 0.4, 0.1, 0.1],
|
93 |
["""莲华:你的目的,就是这个万华镜吧?
|
94 |
|
95 |
莲华拿出了万华镜。
|
@@ -105,7 +107,7 @@ examples = [
|
|
105 |
|
106 |
深见:请让我好好看看……
|
107 |
|
108 |
-
我刚想把手伸过去,莲华就一下子把它收了回去。""", 200, 2.0, 0.4, 0.1, 0.1],
|
109 |
["""嘉祥:偶尔来一次也不错。
|
110 |
|
111 |
我坐到客厅的沙发上,拍了拍自己的大腿。
|
@@ -122,7 +124,7 @@ examples = [
|
|
122 |
|
123 |
我摸摸各自占据住我左右两腿的两颗猫头。
|
124 |
|
125 |
-
嘉祥:开心归开心,拜托你们俩别一直乱动啊,很危险的。""", 200, 2.0, 0.4, 0.1, 0.1],
|
126 |
]
|
127 |
|
128 |
iface = gr.Interface(
|
@@ -150,6 +152,8 @@ iface = gr.Interface(
|
|
150 |
gr.Slider(10, 200, step=10, value=200, label="token_count 每次生成的长度"), # token_count
|
151 |
gr.Slider(0.2, 2.0, step=0.1, value=2, label="temperature 默认0.7,高则变化丰富,低则保守求稳"), # temperature
|
152 |
gr.Slider(0.0, 1.0, step=0.05, value=0.4, label="top_p 默认1.0,高则标新立异,低则循规蹈矩"), # top_p
|
|
|
|
|
153 |
gr.Slider(0.0, 1.0, step=0.1, value=0.1, label="presencePenalty 默认0.0,避免写过的类似字"), # presencePenalty
|
154 |
gr.Slider(0.0, 1.0, step=0.1, value=0.1, label="countPenalty 默认0.0,额外避免写过多次的类似字"), # countPenalty
|
155 |
],
|
|
|
23 |
model = RWKV(model=model_path, strategy='cuda bf16')
|
24 |
else:
|
25 |
model = RWKV(model=model_path, strategy='cpu bf16')
|
26 |
+
from utils import PIPELINE, PIPELINE_ARGS
|
27 |
pipeline = PIPELINE(model, "20B_tokenizer.json")
|
28 |
|
29 |
def infer(
|
|
|
31 |
token_count=10,
|
32 |
temperature=0.7,
|
33 |
top_p=1.0,
|
34 |
+
top_k=50,
|
35 |
+
typical_p=1.0,
|
36 |
presencePenalty = 0.05,
|
37 |
countPenalty = 0.05,
|
38 |
):
|
39 |
+
args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p), top_k=int(top_k),typical_p=float(typical_p),
|
40 |
alpha_frequency = countPenalty,
|
41 |
alpha_presence = presencePenalty,
|
42 |
token_ban = [0], # ban the generation of some tokens
|
|
|
65 |
for n in occurrence:
|
66 |
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
|
67 |
|
68 |
+
token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p, top_k=args.top_k, typical_p=args.typical_p)
|
69 |
if token in args.token_stop:
|
70 |
break
|
71 |
all_tokens += [token]
|
|
|
90 |
|
91 |
女招待:是吗。那真是太好了
|
92 |
|
93 |
+
我因为撰稿的需要,而造访了这间位于信州山间的温泉宿驿。""", 200, 2.0, 0.4, 0, 1.0, 0.1, 0.1],
|
94 |
+
["翡翠:欢迎回来,志贵少爷。", 200, 2.0, 0.4, 0, 1.0, 0.1, 0.1],
|
95 |
["""莲华:你的目的,就是这个万华镜吧?
|
96 |
|
97 |
莲华拿出了万华镜。
|
|
|
107 |
|
108 |
深见:请让我好好看看……
|
109 |
|
110 |
+
我刚想把手伸过去,莲华就一下子把它收了回去。""", 200, 2.0, 0.4, 0, 1.0, 0.1, 0.1],
|
111 |
["""嘉祥:偶尔来一次也不错。
|
112 |
|
113 |
我坐到客厅的沙发上,拍了拍自己的大腿。
|
|
|
124 |
|
125 |
我摸摸各自占据住我左右两腿的两颗猫头。
|
126 |
|
127 |
+
嘉祥:开心归开心,拜托你们俩别一直乱动啊,很危险的。""", 200, 2.0, 0.4, 0, 1.0, 0.1, 0.1],
|
128 |
]
|
129 |
|
130 |
iface = gr.Interface(
|
|
|
152 |
gr.Slider(10, 200, step=10, value=200, label="token_count 每次生成的长度"), # token_count
|
153 |
gr.Slider(0.2, 2.0, step=0.1, value=2, label="temperature 默认0.7,高则变化丰富,低则保守求稳"), # temperature
|
154 |
gr.Slider(0.0, 1.0, step=0.05, value=0.4, label="top_p 默认1.0,高则标新立异,低则循规蹈矩"), # top_p
|
155 |
+
gr.Slider(0, 500, step=1, value=0, label="top_k 默认0(不过滤),0以上时高则标新立异,低则循规蹈矩"), # top_p
|
156 |
+
gr.Slider(0.05, 1.0, step=0.05, value=1.0, label="typical_p 默认1.0,高则保留模型天性,低则试图贴近人类典型习惯"), # top_p
|
157 |
gr.Slider(0.0, 1.0, step=0.1, value=0.1, label="presencePenalty 默认0.0,避免写过的类似字"), # presencePenalty
|
158 |
gr.Slider(0.0, 1.0, step=0.1, value=0.1, label="countPenalty 默认0.0,额外避免写过多次的类似字"), # countPenalty
|
159 |
],
|
utils.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json, time, random, os
|
2 |
+
import numpy as np
|
3 |
+
import torch
|
4 |
+
from torch.nn import functional as F
|
5 |
+
|
6 |
+
class PIPELINE_ARGS():
|
7 |
+
def __init__(self, temperature=1.0, top_p=0.85, top_k=0, typical_p=1, alpha_frequency=0.2, alpha_presence=0.2, token_ban=[], token_stop=[], chunk_len=256):
|
8 |
+
self.temperature = temperature
|
9 |
+
self.top_p = top_p
|
10 |
+
self.top_k = top_k
|
11 |
+
self.typical_p = typical_p
|
12 |
+
self.alpha_frequency = alpha_frequency # Frequency Penalty (as in GPT-3)
|
13 |
+
self.alpha_presence = alpha_presence # Presence Penalty (as in GPT-3)
|
14 |
+
self.token_ban = token_ban # ban the generation of some tokens
|
15 |
+
self.token_stop = token_stop # stop generation whenever you see any token here
|
16 |
+
self.chunk_len = chunk_len # split input into chunks to save VRAM (shorter -> slower)
|
17 |
+
|
18 |
+
class PIPELINE():
|
19 |
+
def __init__(self, model, WORD_NAME):
|
20 |
+
self.model = model
|
21 |
+
if WORD_NAME == 'cl100k_base':
|
22 |
+
import tiktoken
|
23 |
+
self.tokenizer = tiktoken.get_encoding(WORD_NAME)
|
24 |
+
else:
|
25 |
+
from tokenizers import Tokenizer
|
26 |
+
self.tokenizer = Tokenizer.from_file(WORD_NAME)
|
27 |
+
|
28 |
+
def refine_context(self, context):
|
29 |
+
context = context.strip().split('\n')
|
30 |
+
for c in range(len(context)):
|
31 |
+
context[c] = context[c].strip().strip('\u3000').strip('\r')
|
32 |
+
context = list(filter(lambda c: c != '', context))
|
33 |
+
context = '\n' + ('\n'.join(context)).strip()
|
34 |
+
if context == '':
|
35 |
+
context = '\n'
|
36 |
+
return context
|
37 |
+
|
38 |
+
def encode(self, x):
|
39 |
+
if 'tiktoken' in str(type(self.tokenizer)):
|
40 |
+
return self.tokenizer.encode(x)
|
41 |
+
else:
|
42 |
+
return self.tokenizer.encode(x).ids
|
43 |
+
|
44 |
+
def decode(self, x):
|
45 |
+
return self.tokenizer.decode(x)
|
46 |
+
|
47 |
+
def sample_logits(self, logits, temperature=1.0, top_p=0.85, top_k=0,typical_p=1):
|
48 |
+
probs = F.softmax(logits.float(), dim=-1)
|
49 |
+
top_k = int(top_k)
|
50 |
+
if typical_p<1:
|
51 |
+
entropy = torch.nansum(-torch.log(probs) * probs, dim=-1, keepdim=True)
|
52 |
+
typical_scores = torch.abs(logits - entropy)
|
53 |
+
typical_sorted_ids = torch.argsort(typical_scores)
|
54 |
+
sorted_typical_scores = typical_scores[typical_sorted_ids]
|
55 |
+
typical_sorted_probs = probs[typical_sorted_ids]
|
56 |
+
cum_typical_sorted_probs = torch.cumsum(typical_sorted_probs, dim=-1).cpu().numpy()
|
57 |
+
typical_cutoff = float(sorted_typical_scores[np.argmax(cum_typical_sorted_probs > typical_p)])
|
58 |
+
if probs.device == torch.device('cpu'):
|
59 |
+
probs = probs.numpy()
|
60 |
+
sorted_ids = np.argsort(probs)
|
61 |
+
sorted_probs = probs[sorted_ids][::-1]
|
62 |
+
cumulative_probs = np.cumsum(sorted_probs)
|
63 |
+
cutoff = float(sorted_probs[np.argmax(cumulative_probs > top_p)])
|
64 |
+
probs[probs < cutoff] = 0
|
65 |
+
if top_k < len(probs) and top_k > 0:
|
66 |
+
probs[sorted_ids[:-top_k]] = 0
|
67 |
+
if typical_p<1:
|
68 |
+
probs[typical_scores > typical_cutoff] = 0
|
69 |
+
if temperature != 1.0:
|
70 |
+
probs = probs ** (1.0 / temperature)
|
71 |
+
probs = probs / np.sum(probs)
|
72 |
+
out = np.random.choice(a=len(probs), p=probs)
|
73 |
+
return int(out)
|
74 |
+
else:
|
75 |
+
sorted_ids = torch.argsort(probs)
|
76 |
+
sorted_probs = probs[sorted_ids]
|
77 |
+
sorted_probs = torch.flip(sorted_probs, dims=(0,))
|
78 |
+
cumulative_probs = torch.cumsum(sorted_probs, dim=-1).cpu().numpy()
|
79 |
+
cutoff = float(sorted_probs[np.argmax(cumulative_probs > top_p)])
|
80 |
+
probs[probs < cutoff] = 0
|
81 |
+
if top_k < len(probs) and top_k > 0:
|
82 |
+
probs[sorted_ids[:-top_k]] = 0
|
83 |
+
if typical_p<1:
|
84 |
+
probs[typical_scores > typical_cutoff] = 0
|
85 |
+
if temperature != 1.0:
|
86 |
+
probs = probs ** (1.0 / temperature)
|
87 |
+
out = torch.multinomial(probs, num_samples=1)[0]
|
88 |
+
return int(out)
|
89 |
+
|
90 |
+
def generate(self, ctx, token_count=100, args=PIPELINE_ARGS(), callback=None, state=None):
|
91 |
+
all_tokens = []
|
92 |
+
out_last = 0
|
93 |
+
out_str = ''
|
94 |
+
occurrence = {}
|
95 |
+
for i in range(token_count):
|
96 |
+
|
97 |
+
# forward & adjust prob.
|
98 |
+
tokens = self.encode(ctx) if i == 0 else [token]
|
99 |
+
while len(tokens) > 0:
|
100 |
+
out, state = self.model.forward(tokens[:args.chunk_len], state)
|
101 |
+
tokens = tokens[args.chunk_len:]
|
102 |
+
|
103 |
+
for n in args.token_ban:
|
104 |
+
out[n] = -float('inf')
|
105 |
+
for n in occurrence:
|
106 |
+
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
|
107 |
+
|
108 |
+
# sampler
|
109 |
+
token = self.sample_logits(out, temperature=args.temperature, top_p=args.top_p, top_k=args.top_k, typical_p=args.typical_p)
|
110 |
+
if token in args.token_stop:
|
111 |
+
break
|
112 |
+
all_tokens += [token]
|
113 |
+
if token not in occurrence:
|
114 |
+
occurrence[token] = 1
|
115 |
+
else:
|
116 |
+
occurrence[token] += 1
|
117 |
+
|
118 |
+
# output
|
119 |
+
tmp = self.decode(all_tokens[out_last:])
|
120 |
+
if '\ufffd' not in tmp: # is valid utf-8 string?
|
121 |
+
if callback:
|
122 |
+
callback(tmp)
|
123 |
+
out_str += tmp
|
124 |
+
out_last = i + 1
|
125 |
+
return out_str
|