Fix: update example in readme
Browse filesI typed bert in case of my model,
README.md
CHANGED
@@ -48,7 +48,7 @@ from datasets import load_dataset
|
|
48 |
|
49 |
# Load IMDb dataset for binary classification
|
50 |
dataset = load_dataset("imdb")
|
51 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
52 |
|
53 |
# Tokenize the dataset
|
54 |
def preprocess(example):
|
@@ -57,7 +57,7 @@ def preprocess(example):
|
|
57 |
tokenized_datasets = dataset.map(preprocess, batched=True)
|
58 |
|
59 |
# Load model for binary classification (num_labels=2)
|
60 |
-
model = AutoModelForSequenceClassification.from_pretrained("
|
61 |
|
62 |
# Training arguments
|
63 |
training_args = TrainingArguments(
|
@@ -90,7 +90,7 @@ from datasets import load_dataset
|
|
90 |
|
91 |
# Load AG News dataset for multi-class classification (4 labels)
|
92 |
dataset = load_dataset("ag_news")
|
93 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
94 |
|
95 |
# Tokenize the dataset
|
96 |
def preprocess(example):
|
@@ -99,7 +99,7 @@ def preprocess(example):
|
|
99 |
tokenized_datasets = dataset.map(preprocess, batched=True)
|
100 |
|
101 |
# Load model for multi-class classification (num_labels=4)
|
102 |
-
model = AutoModelForSequenceClassification.from_pretrained("
|
103 |
|
104 |
# Training arguments
|
105 |
training_args = TrainingArguments(
|
@@ -149,8 +149,8 @@ Use the code below to get started with the model.
|
|
149 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
150 |
|
151 |
# Load Model and tokenizers
|
152 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
153 |
-
model = AutoModelForSequenceClassification.from_pretrained("
|
154 |
```
|
155 |
|
156 |
|
|
|
48 |
|
49 |
# Load IMDb dataset for binary classification
|
50 |
dataset = load_dataset("imdb")
|
51 |
+
tokenizer = AutoTokenizer.from_pretrained("yash3056/Llama-3.2-1B-imdb")
|
52 |
|
53 |
# Tokenize the dataset
|
54 |
def preprocess(example):
|
|
|
57 |
tokenized_datasets = dataset.map(preprocess, batched=True)
|
58 |
|
59 |
# Load model for binary classification (num_labels=2)
|
60 |
+
model = AutoModelForSequenceClassification.from_pretrained("yash3056/Llama-3.2-1B-imdb", num_labels=2)
|
61 |
|
62 |
# Training arguments
|
63 |
training_args = TrainingArguments(
|
|
|
90 |
|
91 |
# Load AG News dataset for multi-class classification (4 labels)
|
92 |
dataset = load_dataset("ag_news")
|
93 |
+
tokenizer = AutoTokenizer.from_pretrained("yash3056/Llama-3.2-1B-imdb")
|
94 |
|
95 |
# Tokenize the dataset
|
96 |
def preprocess(example):
|
|
|
99 |
tokenized_datasets = dataset.map(preprocess, batched=True)
|
100 |
|
101 |
# Load model for multi-class classification (num_labels=4)
|
102 |
+
model = AutoModelForSequenceClassification.from_pretrained("yash3056/Llama-3.2-1B-imdb", num_labels=4)
|
103 |
|
104 |
# Training arguments
|
105 |
training_args = TrainingArguments(
|
|
|
149 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
150 |
|
151 |
# Load Model and tokenizers
|
152 |
+
tokenizer = AutoTokenizer.from_pretrained("yash3056/Llama-3.2-1B-imdb")
|
153 |
+
model = AutoModelForSequenceClassification.from_pretrained("yash3056/Llama-3.2-1B-imdb", num_labels=n) #n is the number of labels in the code
|
154 |
```
|
155 |
|
156 |
|