h-eval / bench.py
a686d380's picture
Upload bench.py
cd412c4
raw
history blame
2.9 kB
########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
########################################################################################################
print('\nHere are some demos for RWKV-4-World models (https://huggingface.co/BlinkDL/rwkv-4-world)\n')
import os, re
import json
os.environ['RWKV_JIT_ON'] = '1' #### set these before import RWKV
os.environ["RWKV_CUDA_ON"] = '0' #### set to '1' to compile CUDA kernel (10x faster) - requires c++ compiler & cuda libraries
from rwkv.model import RWKV #### pip install rwkv --upgrade
from rwkv.utils import PIPELINE, PIPELINE_ARGS
MODEL_FILE = './model/rwkv-29'
model = RWKV(model=MODEL_FILE, strategy='cuda bf16')
pipeline = PIPELINE(model, "rwkv_vocab_v20230424") #### vocab for rwkv-4-world models
def my_qa_generator(ctx,length):
out_tokens = []
out_len = 0
out_str = ''
occurrence = {}
state = None
for i in range(length):
if i == 0:
out, state = pipeline.model.forward(pipeline.encode(ctx), state)
else:
out, state = pipeline.model.forward([token], state)
for n in occurrence: out[n] -= (0.4 + occurrence[n] * 0.4) #### higher repetition penalty because of lower top_p here
token = pipeline.sample_logits(out, temperature=1.0, top_p=0.2) #### sample the next token
if token == 0: break #### exit at token [0] = <|endoftext|>
out_tokens += [token]
for n in occurrence: occurrence[n] *= 0.996 #### decay repetition penalty
occurrence[token] = 1 + (occurrence[token] if token in occurrence else 0)
tmp = pipeline.decode(out_tokens[out_len:])
if ('\ufffd' not in tmp) and (not tmp.endswith('\n')): #### print() only when out_str is valid utf-8 and not end with \n
out_str += tmp
#print(tmp, end = '', flush = True)
out_len = i + 1
elif '\n\n' in tmp: #### exit at '\n\n'
tmp = tmp.rstrip()
out_str += tmp
#print(tmp, end = '', flush = True)
break
return out_str.strip()
def bench():
data = json.load(open('heval_v1.json','r',encoding='utf-8'))
yes = 0
for i,q in enumerate(data):
question = q['question']
ctx = my_qa_generator(question,6)
#ctx = tokenizer.tokenizer.decode(ctx)
flag=False
for ans in q['answer']:
if ctx[:len(ans)] == ans:
yes+=1
flag=True
print(i,yes,len(data),yes/(i+1))
if not flag:
ans = q['answer'][0]
print(q['id'])
print(question)
print(ctx[:len(ans)],ans)
print('-'*20)
print(yes,len(data),yes/len(data))
bench()