Update README.md
Browse files
README.md
CHANGED
@@ -10,18 +10,9 @@ language:
|
|
10 |
from transformers import AutoConfig, AutoTokenizer
|
11 |
from optimum.intel.openvino import OVModelForCausalLM
|
12 |
|
13 |
-
if model_to_run.value == "INT4":
|
14 |
-
model_dir = int4_model_dir
|
15 |
-
elif model_to_run.value == "INT8":
|
16 |
-
model_dir = int8_model_dir
|
17 |
-
else:
|
18 |
-
model_dir = fp16_model_dir
|
19 |
-
print(f"Loading model from {model_dir}")
|
20 |
-
|
21 |
ov_config = {"PERFORMANCE_HINT": "LATENCY", "NUM_STREAMS": "1", "CACHE_DIR": "", "INFERENCE_PRECISION_HINT": "f16"}
|
22 |
|
23 |
-
|
24 |
-
tok = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
25 |
|
26 |
ov_model = OVModelForCausalLM.from_pretrained(
|
27 |
"xriminact/llama-3-8b-instruct-openvino",
|
|
|
10 |
from transformers import AutoConfig, AutoTokenizer
|
11 |
from optimum.intel.openvino import OVModelForCausalLM
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
ov_config = {"PERFORMANCE_HINT": "LATENCY", "NUM_STREAMS": "1", "CACHE_DIR": "", "INFERENCE_PRECISION_HINT": "f16"}
|
14 |
|
15 |
+
tok = AutoTokenizer.from_pretrained("xriminact/llama-3-8b-instruct-openvino", trust_remote_code=True)
|
|
|
16 |
|
17 |
ov_model = OVModelForCausalLM.from_pretrained(
|
18 |
"xriminact/llama-3-8b-instruct-openvino",
|