a686d380 commited on
Commit
056479f
1 Parent(s): 7e192c1

Delete bench.py

Browse files
Files changed (1) hide show
  1. bench.py +0 -84
bench.py DELETED
@@ -1,84 +0,0 @@
1
- ########################################################################################################
2
- # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
3
- ########################################################################################################
4
-
5
- print('\nHere are some demos for RWKV-4-World models (https://huggingface.co/BlinkDL/rwkv-4-world)\n')
6
-
7
- import os, re
8
- import json
9
-
10
- os.environ['RWKV_JIT_ON'] = '1' #### set these before import RWKV
11
- os.environ["RWKV_CUDA_ON"] = '0' #### set to '1' to compile CUDA kernel (10x faster) - requires c++ compiler & cuda libraries
12
-
13
- from rwkv.model import RWKV #### pip install rwkv --upgrade
14
- from rwkv.utils import PIPELINE, PIPELINE_ARGS
15
-
16
- MODEL_FILE = './model/rwkv-29'
17
-
18
- model = RWKV(model=MODEL_FILE, strategy='cuda bf16')
19
- pipeline = PIPELINE(model, "rwkv_vocab_v20230424") #### vocab for rwkv-4-world models
20
-
21
-
22
- def my_qa_generator(ctx,length):
23
- out_tokens = []
24
- out_len = 0
25
- out_str = ''
26
- occurrence = {}
27
- state = None
28
- for i in range(length):
29
-
30
- if i == 0:
31
- out, state = pipeline.model.forward(pipeline.encode(ctx), state)
32
- else:
33
- out, state = pipeline.model.forward([token], state)
34
-
35
- for n in occurrence: out[n] -= (0.4 + occurrence[n] * 0.4) #### higher repetition penalty because of lower top_p here
36
-
37
- token = pipeline.sample_logits(out, temperature=1.0, top_p=0.2) #### sample the next token
38
-
39
- if token == 0: break #### exit at token [0] = <|endoftext|>
40
-
41
- out_tokens += [token]
42
-
43
- for n in occurrence: occurrence[n] *= 0.996 #### decay repetition penalty
44
- occurrence[token] = 1 + (occurrence[token] if token in occurrence else 0)
45
-
46
- tmp = pipeline.decode(out_tokens[out_len:])
47
- if ('\ufffd' not in tmp) and (not tmp.endswith('\n')): #### print() only when out_str is valid utf-8 and not end with \n
48
- out_str += tmp
49
- #print(tmp, end = '', flush = True)
50
- out_len = i + 1
51
- elif '\n\n' in tmp: #### exit at '\n\n'
52
- tmp = tmp.rstrip()
53
- out_str += tmp
54
- #print(tmp, end = '', flush = True)
55
- break
56
- return out_str.strip()
57
-
58
- def bench():
59
-
60
- data = json.load(open('heval_v1.json','r',encoding='utf-8'))
61
- yes = 0
62
- for i,q in enumerate(data):
63
- question = q['question']
64
- ctx = my_qa_generator(question,6)
65
- #ctx = tokenizer.tokenizer.decode(ctx)
66
- flag=False
67
- for ans in q['answer']:
68
- if ctx[:len(ans)] == ans:
69
- yes+=1
70
- flag=True
71
- print(i,yes,len(data),yes/(i+1))
72
-
73
- if not flag:
74
- ans = q['answer'][0]
75
- print(q['id'])
76
- print(question)
77
- print(ctx[:len(ans)],ans)
78
- print('-'*20)
79
-
80
- print(yes,len(data),yes/len(data))
81
-
82
-
83
- bench()
84
-