Spaces:
Sleeping
Sleeping
Commit
·
03f91f4
1
Parent(s):
f72b113
tokenizer from my own moel
Browse files
app.py
CHANGED
@@ -12,7 +12,7 @@ from torch.utils.data import TensorDataset, random_split
|
|
12 |
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
|
13 |
from transformers import BertForSequenceClassification, AdamW, BertConfig
|
14 |
import random
|
15 |
-
tokenizer = AutoTokenizer.from_pretrained('
|
16 |
|
17 |
model = BertForSequenceClassification.from_pretrained(
|
18 |
"armansakif/bengali-fake-news", # Use the 12-layer BERT model, with an uncased vocab.
|
|
|
12 |
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
|
13 |
from transformers import BertForSequenceClassification, AdamW, BertConfig
|
14 |
import random
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained('armansakif/bengali-fake-news')
|
16 |
|
17 |
model = BertForSequenceClassification.from_pretrained(
|
18 |
"armansakif/bengali-fake-news", # Use the 12-layer BERT model, with an uncased vocab.
|