Update README.md
Browse files
README.md
CHANGED
@@ -62,7 +62,7 @@ We would like to take this opportunity to thank
|
|
62 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
63 |
import torch
|
64 |
|
65 |
-
model_id = "Local-Novel-LLM-project/Vecteus-
|
66 |
new_tokens = 1024
|
67 |
|
68 |
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, torch_dtype=torch.float16, attn_implementation="flash_attention_2", device_map="auto")
|
@@ -79,14 +79,7 @@ generated_ids = model.generate(**model_inputs, max_new_tokens=new_tokens, do_sam
|
|
79 |
print(tokenizer.batch_decode(generated_ids)[0])
|
80 |
````
|
81 |
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
- VT0.1 = Ninjav1 + Original Lora
|
86 |
-
- VT0.2 = Ninjav1 128k + Original Lora
|
87 |
-
- VT0.2on0.1 = VT0.1 + VT0.2
|
88 |
-
|
89 |
-
- VT1 = all VT Series + Lora + Ninja 128k and Normal
|
90 |
|
91 |
## Other points to keep in mind
|
92 |
- The training data may be biased. Be careful with the generated sentences.
|
|
|
62 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
63 |
import torch
|
64 |
|
65 |
+
model_id = "Local-Novel-LLM-project/Vecteus-Poet"
|
66 |
new_tokens = 1024
|
67 |
|
68 |
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, torch_dtype=torch.float16, attn_implementation="flash_attention_2", device_map="auto")
|
|
|
79 |
print(tokenizer.batch_decode(generated_ids)[0])
|
80 |
````
|
81 |
|
82 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
## Other points to keep in mind
|
85 |
- The training data may be biased. Be careful with the generated sentences.
|