updated README.md
Browse files
README.md
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
license: other
|
3 |
base_model: meta-llama/Meta-Llama-3-70B-Instruct
|
4 |
model-index:
|
5 |
-
- name: Llama3-70b-4bit
|
6 |
results:
|
7 |
- task:
|
8 |
name: Text Generation
|
@@ -18,7 +18,7 @@ tags:
|
|
18 |
---
|
19 |
|
20 |
|
21 |
-
# Llama3-70b-4bit
|
22 |
|
23 |
This model is a quantized version of [meta-llama/Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct)
|
24 |
|
@@ -49,7 +49,7 @@ Run the following command in the terminal/jupyter_notebook:
|
|
49 |
>>> import torch
|
50 |
|
51 |
>>> # Load model and tokenizer
|
52 |
-
>>> model_id = "screevoai/llama3-70b-4bit"
|
53 |
>>> tokenizer = AutoTokenizer.from_pretrained(model_id)
|
54 |
|
55 |
>>> model = AutoModelForCausalLM.from_pretrained(
|
|
|
2 |
license: other
|
3 |
base_model: meta-llama/Meta-Llama-3-70B-Instruct
|
4 |
model-index:
|
5 |
+
- name: Llama3-70b-Instruct-4bit
|
6 |
results:
|
7 |
- task:
|
8 |
name: Text Generation
|
|
|
18 |
---
|
19 |
|
20 |
|
21 |
+
# Llama3-70b-Instruct-4bit
|
22 |
|
23 |
This model is a quantized version of [meta-llama/Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct)
|
24 |
|
|
|
49 |
>>> import torch
|
50 |
|
51 |
>>> # Load model and tokenizer
|
52 |
+
>>> model_id = "screevoai/llama3-70b-instruct-4bit"
|
53 |
>>> tokenizer = AutoTokenizer.from_pretrained(model_id)
|
54 |
|
55 |
>>> model = AutoModelForCausalLM.from_pretrained(
|