Update README.md
Browse files
README.md
CHANGED
@@ -51,10 +51,8 @@ Use the code below to get started with the model:
|
|
51 |
import torch
|
52 |
from transformers import AutoTokenizer, AutoPeftModelForCausalLM
|
53 |
|
54 |
-
# Set device
|
55 |
DEV = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
56 |
|
57 |
-
# Set model and tokenizer path
|
58 |
adapter_path = "SungJoo/llama2-7b-sft-detox-DPO"
|
59 |
|
60 |
# Load model
|
@@ -65,7 +63,8 @@ model = AutoPeftModelForCausalLM.from_pretrained(
|
|
65 |
|
66 |
# Load tokenizer
|
67 |
tokenizer = AutoTokenizer.from_pretrained(adapter_path)
|
68 |
-
|
|
|
69 |
# Example usage
|
70 |
input_text = "Your input text here"
|
71 |
inputs = tokenizer(input_text, return_tensors="pt").to(DEV)
|
|
|
51 |
import torch
|
52 |
from transformers import AutoTokenizer, AutoPeftModelForCausalLM
|
53 |
|
|
|
54 |
DEV = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
55 |
|
|
|
56 |
adapter_path = "SungJoo/llama2-7b-sft-detox-DPO"
|
57 |
|
58 |
# Load model
|
|
|
63 |
|
64 |
# Load tokenizer
|
65 |
tokenizer = AutoTokenizer.from_pretrained(adapter_path)
|
66 |
+
```
|
67 |
+
```python
|
68 |
# Example usage
|
69 |
input_text = "Your input text here"
|
70 |
inputs = tokenizer(input_text, return_tensors="pt").to(DEV)
|