Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -17,8 +17,8 @@ bert_path = "bert-base-uncased"
|
|
17 |
tokenizer = AutoTokenizer.from_pretrained(bert_path)
|
18 |
|
19 |
# Read and format data.
|
20 |
-
tweets_raw = pd.read_csv("test.csv", nrows=
|
21 |
-
labels_raw = pd.read_csv("test_labels.csv", nrows=
|
22 |
|
23 |
label_set = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
|
24 |
label_vector = labels_raw[label_set].values.tolist()
|
@@ -63,7 +63,7 @@ class ToxicityDataset(Dataset):
|
|
63 |
|
64 |
# Based on user model selection, prepare Dataset and DataLoader
|
65 |
infer_dataset = ToxicityDataset(tweet_df, tokenizer, MAX_LENGTH)
|
66 |
-
infer_params = {"batch_size": INFER_BATCH_SIZE, "shuffle":
|
67 |
infer_loader = DataLoader(infer_dataset, **infer_params)
|
68 |
|
69 |
class BertClass(torch.nn.Module):
|
|
|
17 |
tokenizer = AutoTokenizer.from_pretrained(bert_path)
|
18 |
|
19 |
# Read and format data.
|
20 |
+
tweets_raw = pd.read_csv("test.csv", nrows=50)
|
21 |
+
labels_raw = pd.read_csv("test_labels.csv", nrows=50)
|
22 |
|
23 |
label_set = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
|
24 |
label_vector = labels_raw[label_set].values.tolist()
|
|
|
63 |
|
64 |
# Based on user model selection, prepare Dataset and DataLoader
|
65 |
infer_dataset = ToxicityDataset(tweet_df, tokenizer, MAX_LENGTH)
|
66 |
+
infer_params = {"batch_size": INFER_BATCH_SIZE, "shuffle": False}
|
67 |
infer_loader = DataLoader(infer_dataset, **infer_params)
|
68 |
|
69 |
class BertClass(torch.nn.Module):
|