Update README.md
Browse files
README.md
CHANGED
@@ -47,7 +47,7 @@ max_seq_length = 2048
|
|
47 |
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
48 |
load_in_4bit = False
|
49 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
50 |
-
model_name = "
|
51 |
max_seq_length = max_seq_length,
|
52 |
dtype = dtype,
|
53 |
load_in_4bit = load_in_4bit,
|
@@ -95,7 +95,7 @@ model = AutoPeftModelForCausalLM.from_pretrained(
|
|
95 |
"Xhaheen/Gemma_Urdu_Shaheen_1_epoch",
|
96 |
load_in_4bit = False
|
97 |
)
|
98 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
99 |
|
100 |
|
101 |
input_prompt = """
|
|
|
47 |
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
48 |
load_in_4bit = False
|
49 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
50 |
+
model_name = "Mollel/Gemma_Swahili_Mollel_1_epoch",
|
51 |
max_seq_length = max_seq_length,
|
52 |
dtype = dtype,
|
53 |
load_in_4bit = load_in_4bit,
|
|
|
95 |
"Xhaheen/Gemma_Urdu_Shaheen_1_epoch",
|
96 |
load_in_4bit = False
|
97 |
)
|
98 |
+
tokenizer = AutoTokenizer.from_pretrained("Mollel/Gemma_Swahili_Mollel_1_epoch")
|
99 |
|
100 |
|
101 |
input_prompt = """
|