zhiyuan8 commited on
Commit
fc86cd0
·
verified ·
1 Parent(s): 4ffd31d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +27 -1
README.md CHANGED
@@ -63,6 +63,32 @@ You can use this model just as any other HuggingFace models:
63
  from transformers import AutoModelForCausalLM, AutoTokenizer
64
  model = AutoModelForCausalLM.from_pretrained('fla-hub/rwkv7-1.5B-world', trust_remote_code=True)
65
  tokenizer = AutoTokenizer.from_pretrained('fla-hub/rwkv7-1.5B-world', trust_remote_code=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  ```
67
 
68
  ## Training Details
@@ -85,7 +111,7 @@ This model is trained on the World v3 with a total of 3.119 trillion tokens.
85
 
86
  before conversion: ppl 4.13 acc 69.4%
87
 
88
- after conversion: ppl 4.26 acc 68.8%
89
 
90
  ## FAQ
91
  Q: safetensors metadata is none.
 
63
  from transformers import AutoModelForCausalLM, AutoTokenizer
64
  model = AutoModelForCausalLM.from_pretrained('fla-hub/rwkv7-1.5B-world', trust_remote_code=True)
65
  tokenizer = AutoTokenizer.from_pretrained('fla-hub/rwkv7-1.5B-world', trust_remote_code=True)
66
+
67
+ model = model.cuda()
68
+ prompt = "What is a large language model?"
69
+ messages = [
70
+ {"role": "user", "content": "Who are you?"},
71
+ {"role": "assistant", "content": "I am a GPT-3 based model."},
72
+ {"role": "user", "content": prompt}
73
+ ]
74
+ text = tokenizer.apply_chat_template(
75
+ messages,
76
+ tokenize=False,
77
+ add_generation_prompt=True
78
+ )
79
+
80
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
81
+
82
+ generated_ids = model.generate(
83
+ **model_inputs,
84
+ max_new_tokens=1024,
85
+ )
86
+ generated_ids = [
87
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
88
+ ]
89
+
90
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=False)[0]
91
+ print(response)
92
  ```
93
 
94
  ## Training Details
 
111
 
112
  before conversion: ppl 4.13 acc 69.4%
113
 
114
+ after conversion: ppl 4.26 acc 68.8% (without apply temple)
115
 
116
  ## FAQ
117
  Q: safetensors metadata is none.