|
|
|
|
|
|
|
|
|
print('\nHere are some demos for RWKV-4-World models (https://huggingface.co/BlinkDL/rwkv-4-world)\n') |
|
|
|
import os, re |
|
import json |
|
|
|
os.environ['RWKV_JIT_ON'] = '0' |
|
os.environ["RWKV_CUDA_ON"] = '0' |
|
|
|
from rwkv.model import RWKV |
|
from rwkv.utils import PIPELINE, PIPELINE_ARGS |
|
|
|
MODEL_FILE = '../../RWKV-5-World-3B-v2-20231113-ctx4096' |
|
|
|
model = RWKV(model=MODEL_FILE, strategy='cuda bf16') |
|
pipeline = PIPELINE(model, "rwkv_vocab_v20230424") |
|
|
|
|
|
def my_qa_generator(ctx,length): |
|
out_tokens = [] |
|
out_len = 0 |
|
out_str = '' |
|
occurrence = {} |
|
state = None |
|
for i in range(length): |
|
|
|
if i == 0: |
|
out, state = pipeline.model.forward(pipeline.encode(ctx), state) |
|
else: |
|
out, state = pipeline.model.forward([token], state) |
|
|
|
for n in occurrence: out[n] -= (0.4 + occurrence[n] * 0.4) |
|
|
|
token = pipeline.sample_logits(out, temperature=1.0, top_p=0.2) |
|
|
|
if token == 0: break |
|
|
|
out_tokens += [token] |
|
|
|
for n in occurrence: occurrence[n] *= 0.996 |
|
occurrence[token] = 1 + (occurrence[token] if token in occurrence else 0) |
|
|
|
tmp = pipeline.decode(out_tokens[out_len:]) |
|
if ('\ufffd' not in tmp) and (not tmp.endswith('\n')): |
|
out_str += tmp |
|
|
|
out_len = i + 1 |
|
elif '\n\n' in tmp: |
|
tmp = tmp.rstrip() |
|
out_str += tmp |
|
|
|
break |
|
return out_str.strip() |
|
|
|
|
|
def bench(): |
|
|
|
data = json.load(open('heval_v1.json','r',encoding='utf-8')) |
|
yes = 0 |
|
for i,q in enumerate(data): |
|
question = q['question'] |
|
ctx = my_qa_generator(question,6) |
|
|
|
flag=False |
|
for ans in q['answer']: |
|
if ctx[:len(ans)] == ans: |
|
yes+=1 |
|
flag=True |
|
print(i,yes,len(data),yes/(i+1)) |
|
|
|
print('Score : ',yes/len(data)*100) |
|
|
|
|
|
bench() |
|
|
|
|