devshaheen
commited on
Commit
•
a2c65f6
1
Parent(s):
04f88f9
updated readme
Browse files
README.md
CHANGED
@@ -48,8 +48,8 @@ You can use this fine-tuned model with the Hugging Face `transformers` library.
|
|
48 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
49 |
|
50 |
# Load pre-trained model and tokenizer
|
51 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
52 |
-
model = AutoModelForCausalLM.from_pretrained("
|
53 |
|
54 |
# Example text generation
|
55 |
input_text = "What is the capital of France?"
|
@@ -64,10 +64,3 @@ print(generated_text)
|
|
64 |
|
65 |
|
66 |
|
67 |
-
@misc{llama-2-7b-chat-finetune,
|
68 |
-
author = {Shaheen Nabi},
|
69 |
-
title = {Fine-tuned Llama-2-7B-Chat Model},
|
70 |
-
year = {2024},
|
71 |
-
publisher = {Hugging Face},
|
72 |
-
howpublished = {\url{https://huggingface.co/devshaheen/llama-2-7b-chat-finetune}},
|
73 |
-
}
|
|
|
48 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
49 |
|
50 |
# Load pre-trained model and tokenizer
|
51 |
+
tokenizer = AutoTokenizer.from_pretrained("https://huggingface.co/devshaheen/llama-2-7b-chat-finetune")
|
52 |
+
model = AutoModelForCausalLM.from_pretrained("https://huggingface.co/devshaheen/llama-2-7b-chat-finetune")
|
53 |
|
54 |
# Example text generation
|
55 |
input_text = "What is the capital of France?"
|
|
|
64 |
|
65 |
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|