George-Ogden commited on
Commit
c1eb6fa
1 Parent(s): 98c3e35

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +57 -0
README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ datasets:
4
+ - bookcorpus
5
+ - wikipedia
6
+ language:
7
+ - en
8
+ metrics:
9
+ - glue
10
+ pipeline_tag: text-classification
11
+ ---
12
+ Evaluate on MNLI:
13
+ ```python
14
+ from transformers import (
15
+ default_data_collator,
16
+ AutoTokenizer,
17
+ AutoModelForSequenceClassification,
18
+ Trainer,
19
+ )
20
+ from datasets import load_dataset
21
+
22
+ import functools
23
+
24
+ from utils import compute_metrics, preprocess_function
25
+
26
+ model_name = "George-Ogden/roberta-base-cased-finetuned-mnli"
27
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
28
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
29
+ trainer = Trainer(
30
+ model=model,
31
+ eval_dataset="mnli",
32
+ tokenizer=tokenizer,
33
+ compute_metrics=compute_metrics,
34
+ data_collator=default_data_collator,
35
+ )
36
+
37
+ raw_datasets = load_dataset(
38
+ "glue",
39
+ "mnli",
40
+ ).map(functools.partial(preprocess_function, tokenizer), batched=True)
41
+
42
+ tasks = ["mnli", "mnli-mm"]
43
+ eval_datasets = [
44
+ raw_datasets["validation_matched"],
45
+ raw_datasets["validation_mismatched"],
46
+ ]
47
+
48
+ for layers in reversed(range(model.num_layers + 1)):
49
+ for eval_dataset, task in zip(eval_datasets, tasks):
50
+ metrics = trainer.evaluate(eval_dataset=eval_dataset)
51
+ metrics["eval_samples"] = len(eval_dataset)
52
+
53
+ if task == "mnli-mm":
54
+ metrics = {k + "_mm": v for k, v in metrics.items()}
55
+
56
+ trainer.log_metrics(metrics)
57
+ ```