Update README.md
Browse files
README.md
CHANGED
@@ -33,21 +33,31 @@ This model can answer information about different excplicit ideas in medicine
|
|
33 |
|
34 |
```python
|
35 |
import torch
|
36 |
-
from transformers import
|
37 |
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
-
|
41 |
-
|
42 |
|
43 |
-
|
44 |
-
"""
|
45 |
-
What is Medcine?
|
46 |
-
"""''', return_tensors="pt", return_attention_mask=False)
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
```
|
52 |
|
53 |
|
|
|
33 |
|
34 |
```python
|
35 |
import torch
|
36 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
37 |
|
38 |
+
base_model_id = "microsoft/phi-2"
|
39 |
+
base_model = AutoModelForCausalLM.from_pretrained(
|
40 |
+
base_model_id, # Phi2, same as before
|
41 |
+
device_map="auto",
|
42 |
+
trust_remote_code=True,
|
43 |
+
load_in_8bit=True,
|
44 |
+
torch_dtype=torch.float16,
|
45 |
+
)
|
46 |
|
47 |
+
eval_tokenizer = AutoTokenizer.from_pretrained(base_model_id, add_bos_token=True, trust_remote_code=True, use_fast=False)
|
48 |
+
eval_tokenizer.pad_token = tokenizer.eos_token
|
49 |
|
50 |
+
from peft import PeftModel
|
|
|
|
|
|
|
51 |
|
52 |
+
adapter_model_id = "segestic/phi2_medical_seg"
|
53 |
+
ft_model = PeftModel.from_pretrained(base_model, adapter_model_id)
|
54 |
+
|
55 |
+
eval_prompt = "What is medicine?"
|
56 |
+
model_input = eval_tokenizer(eval_prompt, return_tensors="pt").to("cuda")
|
57 |
+
|
58 |
+
ft_model.eval()
|
59 |
+
with torch.no_grad():
|
60 |
+
print(eval_tokenizer.decode(ft_model.generate(**model_input, max_new_tokens=100, repetition_penalty=1.11)[0], skip_special_tokens=True))
|
61 |
```
|
62 |
|
63 |
|