Update README.md
Browse files
README.md
CHANGED
@@ -12,13 +12,13 @@ from optimum.intel.openvino import OVModelForCausalLM
|
|
12 |
|
13 |
ov_config = {"PERFORMANCE_HINT": "LATENCY", "NUM_STREAMS": "1", "CACHE_DIR": "", "INFERENCE_PRECISION_HINT": "f16"}
|
14 |
|
15 |
-
tok = AutoTokenizer.from_pretrained("xriminact/llama-3-8b-instruct-openvino", trust_remote_code=True)
|
16 |
|
17 |
ov_model = OVModelForCausalLM.from_pretrained(
|
18 |
-
"xriminact/llama-3-8b-instruct-openvino",
|
19 |
device="GPU",
|
20 |
ov_config=ov_config,
|
21 |
-
config=AutoConfig.from_pretrained("xriminact/llama-3-8b-instruct-openvino", trust_remote_code=True),
|
22 |
trust_remote_code=True,
|
23 |
)
|
24 |
|
|
|
12 |
|
13 |
ov_config = {"PERFORMANCE_HINT": "LATENCY", "NUM_STREAMS": "1", "CACHE_DIR": "", "INFERENCE_PRECISION_HINT": "f16"}
|
14 |
|
15 |
+
tok = AutoTokenizer.from_pretrained("xriminact/llama-3-8b-instruct-openvino-int4", trust_remote_code=True)
|
16 |
|
17 |
ov_model = OVModelForCausalLM.from_pretrained(
|
18 |
+
"xriminact/llama-3-8b-instruct-openvino-int4",
|
19 |
device="GPU",
|
20 |
ov_config=ov_config,
|
21 |
+
config=AutoConfig.from_pretrained("xriminact/llama-3-8b-instruct-openvino-int4", trust_remote_code=True),
|
22 |
trust_remote_code=True,
|
23 |
)
|
24 |
|