vermouthdky commited on
Commit
d1a37ea
1 Parent(s): 6c73b3d

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: baichuan-inc/Baichuan2-7B-Base
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.7.2.dev0
adapter_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "baichuan-inc/Baichuan2-7B-Base",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 16,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "gate_proj",
23
+ "up_proj",
24
+ "down_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_rslora": true
28
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c9e9b4570f31e6c3f1d520bb7c39a3f97c1647fe4c15dfa7f98dccebc5f2924
3
+ size 92824216
checkpoint-2000/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: baichuan-inc/Baichuan2-7B-Base
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.7.2.dev0
checkpoint-2000/adapter_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "baichuan-inc/Baichuan2-7B-Base",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 16,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "down_proj",
23
+ "gate_proj",
24
+ "up_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_rslora": true
28
+ }
checkpoint-2000/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:682a8a19353ac74e98288e884dd698af48654719dd9e6447df4372ad4c55dcea
3
+ size 92867978
checkpoint-2000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c3f38367f945e2038f511d3149d7b975eac58d009bb5164d599440b0c27d429
3
+ size 185759930
checkpoint-2000/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a6cc6f29317960f4f789cae85c1086093031a1aa9381891227daf709ddb3c31
3
+ size 15024
checkpoint-2000/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d78f874d28b308923e01be1a6ee74f612ace5e5b32983243774e93c1b394827a
3
+ size 15024
checkpoint-2000/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:621759f38389d20c86b422cc6a09a0e3a378093f51c9f5a0fcb1566e54dd5d83
3
+ size 15024
checkpoint-2000/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15860462061a5fb1486aabf635d2db5d8bd20bbdb4cd37590590ad9225ff353b
3
+ size 15024
checkpoint-2000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15d039c7229079a685d5ddeec6053e0754825a863b79bce349eb14e690a466c8
3
+ size 1064
checkpoint-2000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c16c186c4767468129937e947797dba84e00252f3a99686516f4e612d21bc85c
3
+ size 4536
checkpoint-3000/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: baichuan-inc/Baichuan2-7B-Base
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.7.2.dev0
checkpoint-3000/adapter_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "baichuan-inc/Baichuan2-7B-Base",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 16,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "down_proj",
23
+ "gate_proj",
24
+ "up_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_rslora": true
28
+ }
checkpoint-3000/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03ee8dece7caec865a860631b035ea997d052f8274d47b2b834478a65024b356
3
+ size 92867978
checkpoint-3000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07d90686cba867ae74cb24bf168b2c17abf569a1e7385142a5ca63265d085158
3
+ size 185759930
checkpoint-3000/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8cff3b4ffde83513ea14a1917bb01a89e398bb931a12679f8b5f0d4271d32b5
3
+ size 15024
checkpoint-3000/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d451235a472621ebfe0540662de74a8d0005b87c923fa66909696168dcd15e5
3
+ size 15024
checkpoint-3000/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db20585c6144398f54a7d7b5db6196f66204eb7522f97919a397260b7f6d468f
3
+ size 15024
checkpoint-3000/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f94b0def06a5b53079b067d146b2181778eb9fe867bb948139d35216875cc311
3
+ size 15024
checkpoint-3000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a6245a209d386d050bac6188eb2e2fef63839b8a2677e64a04f0e7c6c8a219d
3
+ size 1064
checkpoint-3000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-3000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c16c186c4767468129937e947797dba84e00252f3a99686516f4e612d21bc85c
3
+ size 4536
llm-harness/results.json ADDED
@@ -0,0 +1,2592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "mmlu": {
4
+ "acc,none": 0.4811992593647629,
5
+ "acc_stderr,none": 0.11827476804960772,
6
+ "alias": "mmlu"
7
+ },
8
+ "mmlu_humanities": {
9
+ "alias": " - humanities",
10
+ "acc,none": 0.45504782146652495,
11
+ "acc_stderr,none": 0.12864316445574026
12
+ },
13
+ "mmlu_formal_logic": {
14
+ "alias": " - formal_logic",
15
+ "acc,none": 0.31746031746031744,
16
+ "acc_stderr,none": 0.04163453031302859
17
+ },
18
+ "mmlu_high_school_european_history": {
19
+ "alias": " - high_school_european_history",
20
+ "acc,none": 0.6424242424242425,
21
+ "acc_stderr,none": 0.037425970438065864
22
+ },
23
+ "mmlu_high_school_us_history": {
24
+ "alias": " - high_school_us_history",
25
+ "acc,none": 0.6372549019607843,
26
+ "acc_stderr,none": 0.03374499356319355
27
+ },
28
+ "mmlu_high_school_world_history": {
29
+ "alias": " - high_school_world_history",
30
+ "acc,none": 0.679324894514768,
31
+ "acc_stderr,none": 0.030381931949990417
32
+ },
33
+ "mmlu_international_law": {
34
+ "alias": " - international_law",
35
+ "acc,none": 0.6694214876033058,
36
+ "acc_stderr,none": 0.04294340845212094
37
+ },
38
+ "mmlu_jurisprudence": {
39
+ "alias": " - jurisprudence",
40
+ "acc,none": 0.6018518518518519,
41
+ "acc_stderr,none": 0.04732332615978814
42
+ },
43
+ "mmlu_logical_fallacies": {
44
+ "alias": " - logical_fallacies",
45
+ "acc,none": 0.558282208588957,
46
+ "acc_stderr,none": 0.03901591825836184
47
+ },
48
+ "mmlu_moral_disputes": {
49
+ "alias": " - moral_disputes",
50
+ "acc,none": 0.5491329479768786,
51
+ "acc_stderr,none": 0.026788811931562753
52
+ },
53
+ "mmlu_moral_scenarios": {
54
+ "alias": " - moral_scenarios",
55
+ "acc,none": 0.2424581005586592,
56
+ "acc_stderr,none": 0.014333522059217887
57
+ },
58
+ "mmlu_philosophy": {
59
+ "alias": " - philosophy",
60
+ "acc,none": 0.5337620578778135,
61
+ "acc_stderr,none": 0.028333277109562786
62
+ },
63
+ "mmlu_prehistory": {
64
+ "alias": " - prehistory",
65
+ "acc,none": 0.558641975308642,
66
+ "acc_stderr,none": 0.027628737155668777
67
+ },
68
+ "mmlu_professional_law": {
69
+ "alias": " - professional_law",
70
+ "acc,none": 0.39048239895697523,
71
+ "acc_stderr,none": 0.012460135913945073
72
+ },
73
+ "mmlu_world_religions": {
74
+ "alias": " - world_religions",
75
+ "acc,none": 0.6666666666666666,
76
+ "acc_stderr,none": 0.036155076303109344
77
+ },
78
+ "mmlu_other": {
79
+ "alias": " - other",
80
+ "acc,none": 0.5526231091084648,
81
+ "acc_stderr,none": 0.09630130905093312
82
+ },
83
+ "mmlu_business_ethics": {
84
+ "alias": " - business_ethics",
85
+ "acc,none": 0.57,
86
+ "acc_stderr,none": 0.04975698519562427
87
+ },
88
+ "mmlu_clinical_knowledge": {
89
+ "alias": " - clinical_knowledge",
90
+ "acc,none": 0.5207547169811321,
91
+ "acc_stderr,none": 0.030746349975723463
92
+ },
93
+ "mmlu_college_medicine": {
94
+ "alias": " - college_medicine",
95
+ "acc,none": 0.4624277456647399,
96
+ "acc_stderr,none": 0.0380168510452446
97
+ },
98
+ "mmlu_global_facts": {
99
+ "alias": " - global_facts",
100
+ "acc,none": 0.36,
101
+ "acc_stderr,none": 0.04824181513244218
102
+ },
103
+ "mmlu_human_aging": {
104
+ "alias": " - human_aging",
105
+ "acc,none": 0.5919282511210763,
106
+ "acc_stderr,none": 0.03298574607842822
107
+ },
108
+ "mmlu_management": {
109
+ "alias": " - management",
110
+ "acc,none": 0.6699029126213593,
111
+ "acc_stderr,none": 0.0465614711001235
112
+ },
113
+ "mmlu_marketing": {
114
+ "alias": " - marketing",
115
+ "acc,none": 0.7008547008547008,
116
+ "acc_stderr,none": 0.029996951858349486
117
+ },
118
+ "mmlu_medical_genetics": {
119
+ "alias": " - medical_genetics",
120
+ "acc,none": 0.52,
121
+ "acc_stderr,none": 0.05021167315686779
122
+ },
123
+ "mmlu_miscellaneous": {
124
+ "alias": " - miscellaneous",
125
+ "acc,none": 0.6807151979565773,
126
+ "acc_stderr,none": 0.016671261749538722
127
+ },
128
+ "mmlu_nutrition": {
129
+ "alias": " - nutrition",
130
+ "acc,none": 0.5261437908496732,
131
+ "acc_stderr,none": 0.028590752958852394
132
+ },
133
+ "mmlu_professional_accounting": {
134
+ "alias": " - professional_accounting",
135
+ "acc,none": 0.36879432624113473,
136
+ "acc_stderr,none": 0.028782227561347247
137
+ },
138
+ "mmlu_professional_medicine": {
139
+ "alias": " - professional_medicine",
140
+ "acc,none": 0.4411764705882353,
141
+ "acc_stderr,none": 0.030161911930767105
142
+ },
143
+ "mmlu_virology": {
144
+ "alias": " - virology",
145
+ "acc,none": 0.42771084337349397,
146
+ "acc_stderr,none": 0.038515976837185335
147
+ },
148
+ "mmlu_social_sciences": {
149
+ "alias": " - social_sciences",
150
+ "acc,none": 0.5407864803379916,
151
+ "acc_stderr,none": 0.09279588633835714
152
+ },
153
+ "mmlu_econometrics": {
154
+ "alias": " - econometrics",
155
+ "acc,none": 0.2894736842105263,
156
+ "acc_stderr,none": 0.04266339443159394
157
+ },
158
+ "mmlu_high_school_geography": {
159
+ "alias": " - high_school_geography",
160
+ "acc,none": 0.5909090909090909,
161
+ "acc_stderr,none": 0.035029757994130065
162
+ },
163
+ "mmlu_high_school_government_and_politics": {
164
+ "alias": " - high_school_government_and_politics",
165
+ "acc,none": 0.6787564766839378,
166
+ "acc_stderr,none": 0.033699508685490674
167
+ },
168
+ "mmlu_high_school_macroeconomics": {
169
+ "alias": " - high_school_macroeconomics",
170
+ "acc,none": 0.4282051282051282,
171
+ "acc_stderr,none": 0.02508830145469483
172
+ },
173
+ "mmlu_high_school_microeconomics": {
174
+ "alias": " - high_school_microeconomics",
175
+ "acc,none": 0.42436974789915966,
176
+ "acc_stderr,none": 0.032104790510157764
177
+ },
178
+ "mmlu_high_school_psychology": {
179
+ "alias": " - high_school_psychology",
180
+ "acc,none": 0.6330275229357798,
181
+ "acc_stderr,none": 0.020664675659520532
182
+ },
183
+ "mmlu_human_sexuality": {
184
+ "alias": " - human_sexuality",
185
+ "acc,none": 0.6259541984732825,
186
+ "acc_stderr,none": 0.04243869242230523
187
+ },
188
+ "mmlu_professional_psychology": {
189
+ "alias": " - professional_psychology",
190
+ "acc,none": 0.49019607843137253,
191
+ "acc_stderr,none": 0.0202239460050743
192
+ },
193
+ "mmlu_public_relations": {
194
+ "alias": " - public_relations",
195
+ "acc,none": 0.5636363636363636,
196
+ "acc_stderr,none": 0.04750185058907296
197
+ },
198
+ "mmlu_security_studies": {
199
+ "alias": " - security_studies",
200
+ "acc,none": 0.4816326530612245,
201
+ "acc_stderr,none": 0.03198761546763127
202
+ },
203
+ "mmlu_sociology": {
204
+ "alias": " - sociology",
205
+ "acc,none": 0.681592039800995,
206
+ "acc_stderr,none": 0.03294118479054095
207
+ },
208
+ "mmlu_us_foreign_policy": {
209
+ "alias": " - us_foreign_policy",
210
+ "acc,none": 0.71,
211
+ "acc_stderr,none": 0.04560480215720684
212
+ },
213
+ "mmlu_stem": {
214
+ "alias": " - stem",
215
+ "acc,none": 0.3916904535363146,
216
+ "acc_stderr,none": 0.10496079468282744
217
+ },
218
+ "mmlu_abstract_algebra": {
219
+ "alias": " - abstract_algebra",
220
+ "acc,none": 0.29,
221
+ "acc_stderr,none": 0.04560480215720683
222
+ },
223
+ "mmlu_anatomy": {
224
+ "alias": " - anatomy",
225
+ "acc,none": 0.4666666666666667,
226
+ "acc_stderr,none": 0.043097329010363554
227
+ },
228
+ "mmlu_astronomy": {
229
+ "alias": " - astronomy",
230
+ "acc,none": 0.48026315789473684,
231
+ "acc_stderr,none": 0.04065771002562605
232
+ },
233
+ "mmlu_college_biology": {
234
+ "alias": " - college_biology",
235
+ "acc,none": 0.4722222222222222,
236
+ "acc_stderr,none": 0.04174752578923185
237
+ },
238
+ "mmlu_college_chemistry": {
239
+ "alias": " - college_chemistry",
240
+ "acc,none": 0.3,
241
+ "acc_stderr,none": 0.046056618647183814
242
+ },
243
+ "mmlu_college_computer_science": {
244
+ "alias": " - college_computer_science",
245
+ "acc,none": 0.42,
246
+ "acc_stderr,none": 0.049604496374885836
247
+ },
248
+ "mmlu_college_mathematics": {
249
+ "alias": " - college_mathematics",
250
+ "acc,none": 0.32,
251
+ "acc_stderr,none": 0.046882617226215034
252
+ },
253
+ "mmlu_college_physics": {
254
+ "alias": " - college_physics",
255
+ "acc,none": 0.22549019607843138,
256
+ "acc_stderr,none": 0.04158307533083286
257
+ },
258
+ "mmlu_computer_security": {
259
+ "alias": " - computer_security",
260
+ "acc,none": 0.59,
261
+ "acc_stderr,none": 0.04943110704237102
262
+ },
263
+ "mmlu_conceptual_physics": {
264
+ "alias": " - conceptual_physics",
265
+ "acc,none": 0.40425531914893614,
266
+ "acc_stderr,none": 0.032081157507886836
267
+ },
268
+ "mmlu_electrical_engineering": {
269
+ "alias": " - electrical_engineering",
270
+ "acc,none": 0.5655172413793104,
271
+ "acc_stderr,none": 0.04130740879555497
272
+ },
273
+ "mmlu_elementary_mathematics": {
274
+ "alias": " - elementary_mathematics",
275
+ "acc,none": 0.2857142857142857,
276
+ "acc_stderr,none": 0.023266512213730564
277
+ },
278
+ "mmlu_high_school_biology": {
279
+ "alias": " - high_school_biology",
280
+ "acc,none": 0.5387096774193548,
281
+ "acc_stderr,none": 0.028358634859836935
282
+ },
283
+ "mmlu_high_school_chemistry": {
284
+ "alias": " - high_school_chemistry",
285
+ "acc,none": 0.4187192118226601,
286
+ "acc_stderr,none": 0.03471192860518468
287
+ },
288
+ "mmlu_high_school_computer_science": {
289
+ "alias": " - high_school_computer_science",
290
+ "acc,none": 0.51,
291
+ "acc_stderr,none": 0.05024183937956913
292
+ },
293
+ "mmlu_high_school_mathematics": {
294
+ "alias": " - high_school_mathematics",
295
+ "acc,none": 0.24814814814814815,
296
+ "acc_stderr,none": 0.026335739404055803
297
+ },
298
+ "mmlu_high_school_physics": {
299
+ "alias": " - high_school_physics",
300
+ "acc,none": 0.31125827814569534,
301
+ "acc_stderr,none": 0.03780445850526732
302
+ },
303
+ "mmlu_high_school_statistics": {
304
+ "alias": " - high_school_statistics",
305
+ "acc,none": 0.30092592592592593,
306
+ "acc_stderr,none": 0.03128039084329882
307
+ },
308
+ "mmlu_machine_learning": {
309
+ "alias": " - machine_learning",
310
+ "acc,none": 0.4375,
311
+ "acc_stderr,none": 0.04708567521880525
312
+ }
313
+ },
314
+ "groups": {
315
+ "mmlu": {
316
+ "acc,none": 0.4811992593647629,
317
+ "acc_stderr,none": 0.11827476804960772,
318
+ "alias": "mmlu"
319
+ },
320
+ "mmlu_humanities": {
321
+ "alias": " - humanities",
322
+ "acc,none": 0.45504782146652495,
323
+ "acc_stderr,none": 0.12864316445574026
324
+ },
325
+ "mmlu_other": {
326
+ "alias": " - other",
327
+ "acc,none": 0.5526231091084648,
328
+ "acc_stderr,none": 0.09630130905093312
329
+ },
330
+ "mmlu_social_sciences": {
331
+ "alias": " - social_sciences",
332
+ "acc,none": 0.5407864803379916,
333
+ "acc_stderr,none": 0.09279588633835714
334
+ },
335
+ "mmlu_stem": {
336
+ "alias": " - stem",
337
+ "acc,none": 0.3916904535363146,
338
+ "acc_stderr,none": 0.10496079468282744
339
+ }
340
+ },
341
+ "configs": {
342
+ "mmlu_abstract_algebra": {
343
+ "task": "mmlu_abstract_algebra",
344
+ "task_alias": "abstract_algebra",
345
+ "group": "mmlu_stem",
346
+ "group_alias": "stem",
347
+ "dataset_path": "hails/mmlu_no_train",
348
+ "dataset_name": "abstract_algebra",
349
+ "test_split": "test",
350
+ "fewshot_split": "dev",
351
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
352
+ "doc_to_target": "answer",
353
+ "doc_to_choice": [
354
+ "A",
355
+ "B",
356
+ "C",
357
+ "D"
358
+ ],
359
+ "description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n",
360
+ "target_delimiter": " ",
361
+ "fewshot_delimiter": "\n\n",
362
+ "fewshot_config": {
363
+ "sampler": "first_n"
364
+ },
365
+ "metric_list": [
366
+ {
367
+ "metric": "acc",
368
+ "aggregation": "mean",
369
+ "higher_is_better": true
370
+ }
371
+ ],
372
+ "output_type": "multiple_choice",
373
+ "repeats": 1,
374
+ "should_decontaminate": false,
375
+ "metadata": {
376
+ "version": 0.0
377
+ }
378
+ },
379
+ "mmlu_anatomy": {
380
+ "task": "mmlu_anatomy",
381
+ "task_alias": "anatomy",
382
+ "group": "mmlu_stem",
383
+ "group_alias": "stem",
384
+ "dataset_path": "hails/mmlu_no_train",
385
+ "dataset_name": "anatomy",
386
+ "test_split": "test",
387
+ "fewshot_split": "dev",
388
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
389
+ "doc_to_target": "answer",
390
+ "doc_to_choice": [
391
+ "A",
392
+ "B",
393
+ "C",
394
+ "D"
395
+ ],
396
+ "description": "The following are multiple choice questions (with answers) about anatomy.\n\n",
397
+ "target_delimiter": " ",
398
+ "fewshot_delimiter": "\n\n",
399
+ "fewshot_config": {
400
+ "sampler": "first_n"
401
+ },
402
+ "metric_list": [
403
+ {
404
+ "metric": "acc",
405
+ "aggregation": "mean",
406
+ "higher_is_better": true
407
+ }
408
+ ],
409
+ "output_type": "multiple_choice",
410
+ "repeats": 1,
411
+ "should_decontaminate": false,
412
+ "metadata": {
413
+ "version": 0.0
414
+ }
415
+ },
416
+ "mmlu_astronomy": {
417
+ "task": "mmlu_astronomy",
418
+ "task_alias": "astronomy",
419
+ "group": "mmlu_stem",
420
+ "group_alias": "stem",
421
+ "dataset_path": "hails/mmlu_no_train",
422
+ "dataset_name": "astronomy",
423
+ "test_split": "test",
424
+ "fewshot_split": "dev",
425
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
426
+ "doc_to_target": "answer",
427
+ "doc_to_choice": [
428
+ "A",
429
+ "B",
430
+ "C",
431
+ "D"
432
+ ],
433
+ "description": "The following are multiple choice questions (with answers) about astronomy.\n\n",
434
+ "target_delimiter": " ",
435
+ "fewshot_delimiter": "\n\n",
436
+ "fewshot_config": {
437
+ "sampler": "first_n"
438
+ },
439
+ "metric_list": [
440
+ {
441
+ "metric": "acc",
442
+ "aggregation": "mean",
443
+ "higher_is_better": true
444
+ }
445
+ ],
446
+ "output_type": "multiple_choice",
447
+ "repeats": 1,
448
+ "should_decontaminate": false,
449
+ "metadata": {
450
+ "version": 0.0
451
+ }
452
+ },
453
+ "mmlu_business_ethics": {
454
+ "task": "mmlu_business_ethics",
455
+ "task_alias": "business_ethics",
456
+ "group": "mmlu_other",
457
+ "group_alias": "other",
458
+ "dataset_path": "hails/mmlu_no_train",
459
+ "dataset_name": "business_ethics",
460
+ "test_split": "test",
461
+ "fewshot_split": "dev",
462
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
463
+ "doc_to_target": "answer",
464
+ "doc_to_choice": [
465
+ "A",
466
+ "B",
467
+ "C",
468
+ "D"
469
+ ],
470
+ "description": "The following are multiple choice questions (with answers) about business ethics.\n\n",
471
+ "target_delimiter": " ",
472
+ "fewshot_delimiter": "\n\n",
473
+ "fewshot_config": {
474
+ "sampler": "first_n"
475
+ },
476
+ "metric_list": [
477
+ {
478
+ "metric": "acc",
479
+ "aggregation": "mean",
480
+ "higher_is_better": true
481
+ }
482
+ ],
483
+ "output_type": "multiple_choice",
484
+ "repeats": 1,
485
+ "should_decontaminate": false,
486
+ "metadata": {
487
+ "version": 0.0
488
+ }
489
+ },
490
+ "mmlu_clinical_knowledge": {
491
+ "task": "mmlu_clinical_knowledge",
492
+ "task_alias": "clinical_knowledge",
493
+ "group": "mmlu_other",
494
+ "group_alias": "other",
495
+ "dataset_path": "hails/mmlu_no_train",
496
+ "dataset_name": "clinical_knowledge",
497
+ "test_split": "test",
498
+ "fewshot_split": "dev",
499
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
500
+ "doc_to_target": "answer",
501
+ "doc_to_choice": [
502
+ "A",
503
+ "B",
504
+ "C",
505
+ "D"
506
+ ],
507
+ "description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n",
508
+ "target_delimiter": " ",
509
+ "fewshot_delimiter": "\n\n",
510
+ "fewshot_config": {
511
+ "sampler": "first_n"
512
+ },
513
+ "metric_list": [
514
+ {
515
+ "metric": "acc",
516
+ "aggregation": "mean",
517
+ "higher_is_better": true
518
+ }
519
+ ],
520
+ "output_type": "multiple_choice",
521
+ "repeats": 1,
522
+ "should_decontaminate": false,
523
+ "metadata": {
524
+ "version": 0.0
525
+ }
526
+ },
527
+ "mmlu_college_biology": {
528
+ "task": "mmlu_college_biology",
529
+ "task_alias": "college_biology",
530
+ "group": "mmlu_stem",
531
+ "group_alias": "stem",
532
+ "dataset_path": "hails/mmlu_no_train",
533
+ "dataset_name": "college_biology",
534
+ "test_split": "test",
535
+ "fewshot_split": "dev",
536
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
537
+ "doc_to_target": "answer",
538
+ "doc_to_choice": [
539
+ "A",
540
+ "B",
541
+ "C",
542
+ "D"
543
+ ],
544
+ "description": "The following are multiple choice questions (with answers) about college biology.\n\n",
545
+ "target_delimiter": " ",
546
+ "fewshot_delimiter": "\n\n",
547
+ "fewshot_config": {
548
+ "sampler": "first_n"
549
+ },
550
+ "metric_list": [
551
+ {
552
+ "metric": "acc",
553
+ "aggregation": "mean",
554
+ "higher_is_better": true
555
+ }
556
+ ],
557
+ "output_type": "multiple_choice",
558
+ "repeats": 1,
559
+ "should_decontaminate": false,
560
+ "metadata": {
561
+ "version": 0.0
562
+ }
563
+ },
564
+ "mmlu_college_chemistry": {
565
+ "task": "mmlu_college_chemistry",
566
+ "task_alias": "college_chemistry",
567
+ "group": "mmlu_stem",
568
+ "group_alias": "stem",
569
+ "dataset_path": "hails/mmlu_no_train",
570
+ "dataset_name": "college_chemistry",
571
+ "test_split": "test",
572
+ "fewshot_split": "dev",
573
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
574
+ "doc_to_target": "answer",
575
+ "doc_to_choice": [
576
+ "A",
577
+ "B",
578
+ "C",
579
+ "D"
580
+ ],
581
+ "description": "The following are multiple choice questions (with answers) about college chemistry.\n\n",
582
+ "target_delimiter": " ",
583
+ "fewshot_delimiter": "\n\n",
584
+ "fewshot_config": {
585
+ "sampler": "first_n"
586
+ },
587
+ "metric_list": [
588
+ {
589
+ "metric": "acc",
590
+ "aggregation": "mean",
591
+ "higher_is_better": true
592
+ }
593
+ ],
594
+ "output_type": "multiple_choice",
595
+ "repeats": 1,
596
+ "should_decontaminate": false,
597
+ "metadata": {
598
+ "version": 0.0
599
+ }
600
+ },
601
+ "mmlu_college_computer_science": {
602
+ "task": "mmlu_college_computer_science",
603
+ "task_alias": "college_computer_science",
604
+ "group": "mmlu_stem",
605
+ "group_alias": "stem",
606
+ "dataset_path": "hails/mmlu_no_train",
607
+ "dataset_name": "college_computer_science",
608
+ "test_split": "test",
609
+ "fewshot_split": "dev",
610
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
611
+ "doc_to_target": "answer",
612
+ "doc_to_choice": [
613
+ "A",
614
+ "B",
615
+ "C",
616
+ "D"
617
+ ],
618
+ "description": "The following are multiple choice questions (with answers) about college computer science.\n\n",
619
+ "target_delimiter": " ",
620
+ "fewshot_delimiter": "\n\n",
621
+ "fewshot_config": {
622
+ "sampler": "first_n"
623
+ },
624
+ "metric_list": [
625
+ {
626
+ "metric": "acc",
627
+ "aggregation": "mean",
628
+ "higher_is_better": true
629
+ }
630
+ ],
631
+ "output_type": "multiple_choice",
632
+ "repeats": 1,
633
+ "should_decontaminate": false,
634
+ "metadata": {
635
+ "version": 0.0
636
+ }
637
+ },
638
+ "mmlu_college_mathematics": {
639
+ "task": "mmlu_college_mathematics",
640
+ "task_alias": "college_mathematics",
641
+ "group": "mmlu_stem",
642
+ "group_alias": "stem",
643
+ "dataset_path": "hails/mmlu_no_train",
644
+ "dataset_name": "college_mathematics",
645
+ "test_split": "test",
646
+ "fewshot_split": "dev",
647
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
648
+ "doc_to_target": "answer",
649
+ "doc_to_choice": [
650
+ "A",
651
+ "B",
652
+ "C",
653
+ "D"
654
+ ],
655
+ "description": "The following are multiple choice questions (with answers) about college mathematics.\n\n",
656
+ "target_delimiter": " ",
657
+ "fewshot_delimiter": "\n\n",
658
+ "fewshot_config": {
659
+ "sampler": "first_n"
660
+ },
661
+ "metric_list": [
662
+ {
663
+ "metric": "acc",
664
+ "aggregation": "mean",
665
+ "higher_is_better": true
666
+ }
667
+ ],
668
+ "output_type": "multiple_choice",
669
+ "repeats": 1,
670
+ "should_decontaminate": false,
671
+ "metadata": {
672
+ "version": 0.0
673
+ }
674
+ },
675
+ "mmlu_college_medicine": {
676
+ "task": "mmlu_college_medicine",
677
+ "task_alias": "college_medicine",
678
+ "group": "mmlu_other",
679
+ "group_alias": "other",
680
+ "dataset_path": "hails/mmlu_no_train",
681
+ "dataset_name": "college_medicine",
682
+ "test_split": "test",
683
+ "fewshot_split": "dev",
684
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
685
+ "doc_to_target": "answer",
686
+ "doc_to_choice": [
687
+ "A",
688
+ "B",
689
+ "C",
690
+ "D"
691
+ ],
692
+ "description": "The following are multiple choice questions (with answers) about college medicine.\n\n",
693
+ "target_delimiter": " ",
694
+ "fewshot_delimiter": "\n\n",
695
+ "fewshot_config": {
696
+ "sampler": "first_n"
697
+ },
698
+ "metric_list": [
699
+ {
700
+ "metric": "acc",
701
+ "aggregation": "mean",
702
+ "higher_is_better": true
703
+ }
704
+ ],
705
+ "output_type": "multiple_choice",
706
+ "repeats": 1,
707
+ "should_decontaminate": false,
708
+ "metadata": {
709
+ "version": 0.0
710
+ }
711
+ },
712
+ "mmlu_college_physics": {
713
+ "task": "mmlu_college_physics",
714
+ "task_alias": "college_physics",
715
+ "group": "mmlu_stem",
716
+ "group_alias": "stem",
717
+ "dataset_path": "hails/mmlu_no_train",
718
+ "dataset_name": "college_physics",
719
+ "test_split": "test",
720
+ "fewshot_split": "dev",
721
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
722
+ "doc_to_target": "answer",
723
+ "doc_to_choice": [
724
+ "A",
725
+ "B",
726
+ "C",
727
+ "D"
728
+ ],
729
+ "description": "The following are multiple choice questions (with answers) about college physics.\n\n",
730
+ "target_delimiter": " ",
731
+ "fewshot_delimiter": "\n\n",
732
+ "fewshot_config": {
733
+ "sampler": "first_n"
734
+ },
735
+ "metric_list": [
736
+ {
737
+ "metric": "acc",
738
+ "aggregation": "mean",
739
+ "higher_is_better": true
740
+ }
741
+ ],
742
+ "output_type": "multiple_choice",
743
+ "repeats": 1,
744
+ "should_decontaminate": false,
745
+ "metadata": {
746
+ "version": 0.0
747
+ }
748
+ },
749
+ "mmlu_computer_security": {
750
+ "task": "mmlu_computer_security",
751
+ "task_alias": "computer_security",
752
+ "group": "mmlu_stem",
753
+ "group_alias": "stem",
754
+ "dataset_path": "hails/mmlu_no_train",
755
+ "dataset_name": "computer_security",
756
+ "test_split": "test",
757
+ "fewshot_split": "dev",
758
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
759
+ "doc_to_target": "answer",
760
+ "doc_to_choice": [
761
+ "A",
762
+ "B",
763
+ "C",
764
+ "D"
765
+ ],
766
+ "description": "The following are multiple choice questions (with answers) about computer security.\n\n",
767
+ "target_delimiter": " ",
768
+ "fewshot_delimiter": "\n\n",
769
+ "fewshot_config": {
770
+ "sampler": "first_n"
771
+ },
772
+ "metric_list": [
773
+ {
774
+ "metric": "acc",
775
+ "aggregation": "mean",
776
+ "higher_is_better": true
777
+ }
778
+ ],
779
+ "output_type": "multiple_choice",
780
+ "repeats": 1,
781
+ "should_decontaminate": false,
782
+ "metadata": {
783
+ "version": 0.0
784
+ }
785
+ },
786
+ "mmlu_conceptual_physics": {
787
+ "task": "mmlu_conceptual_physics",
788
+ "task_alias": "conceptual_physics",
789
+ "group": "mmlu_stem",
790
+ "group_alias": "stem",
791
+ "dataset_path": "hails/mmlu_no_train",
792
+ "dataset_name": "conceptual_physics",
793
+ "test_split": "test",
794
+ "fewshot_split": "dev",
795
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
796
+ "doc_to_target": "answer",
797
+ "doc_to_choice": [
798
+ "A",
799
+ "B",
800
+ "C",
801
+ "D"
802
+ ],
803
+ "description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n",
804
+ "target_delimiter": " ",
805
+ "fewshot_delimiter": "\n\n",
806
+ "fewshot_config": {
807
+ "sampler": "first_n"
808
+ },
809
+ "metric_list": [
810
+ {
811
+ "metric": "acc",
812
+ "aggregation": "mean",
813
+ "higher_is_better": true
814
+ }
815
+ ],
816
+ "output_type": "multiple_choice",
817
+ "repeats": 1,
818
+ "should_decontaminate": false,
819
+ "metadata": {
820
+ "version": 0.0
821
+ }
822
+ },
823
+ "mmlu_econometrics": {
824
+ "task": "mmlu_econometrics",
825
+ "task_alias": "econometrics",
826
+ "group": "mmlu_social_sciences",
827
+ "group_alias": "social_sciences",
828
+ "dataset_path": "hails/mmlu_no_train",
829
+ "dataset_name": "econometrics",
830
+ "test_split": "test",
831
+ "fewshot_split": "dev",
832
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
833
+ "doc_to_target": "answer",
834
+ "doc_to_choice": [
835
+ "A",
836
+ "B",
837
+ "C",
838
+ "D"
839
+ ],
840
+ "description": "The following are multiple choice questions (with answers) about econometrics.\n\n",
841
+ "target_delimiter": " ",
842
+ "fewshot_delimiter": "\n\n",
843
+ "fewshot_config": {
844
+ "sampler": "first_n"
845
+ },
846
+ "metric_list": [
847
+ {
848
+ "metric": "acc",
849
+ "aggregation": "mean",
850
+ "higher_is_better": true
851
+ }
852
+ ],
853
+ "output_type": "multiple_choice",
854
+ "repeats": 1,
855
+ "should_decontaminate": false,
856
+ "metadata": {
857
+ "version": 0.0
858
+ }
859
+ },
860
+ "mmlu_electrical_engineering": {
861
+ "task": "mmlu_electrical_engineering",
862
+ "task_alias": "electrical_engineering",
863
+ "group": "mmlu_stem",
864
+ "group_alias": "stem",
865
+ "dataset_path": "hails/mmlu_no_train",
866
+ "dataset_name": "electrical_engineering",
867
+ "test_split": "test",
868
+ "fewshot_split": "dev",
869
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
870
+ "doc_to_target": "answer",
871
+ "doc_to_choice": [
872
+ "A",
873
+ "B",
874
+ "C",
875
+ "D"
876
+ ],
877
+ "description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n",
878
+ "target_delimiter": " ",
879
+ "fewshot_delimiter": "\n\n",
880
+ "fewshot_config": {
881
+ "sampler": "first_n"
882
+ },
883
+ "metric_list": [
884
+ {
885
+ "metric": "acc",
886
+ "aggregation": "mean",
887
+ "higher_is_better": true
888
+ }
889
+ ],
890
+ "output_type": "multiple_choice",
891
+ "repeats": 1,
892
+ "should_decontaminate": false,
893
+ "metadata": {
894
+ "version": 0.0
895
+ }
896
+ },
897
+ "mmlu_elementary_mathematics": {
898
+ "task": "mmlu_elementary_mathematics",
899
+ "task_alias": "elementary_mathematics",
900
+ "group": "mmlu_stem",
901
+ "group_alias": "stem",
902
+ "dataset_path": "hails/mmlu_no_train",
903
+ "dataset_name": "elementary_mathematics",
904
+ "test_split": "test",
905
+ "fewshot_split": "dev",
906
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
907
+ "doc_to_target": "answer",
908
+ "doc_to_choice": [
909
+ "A",
910
+ "B",
911
+ "C",
912
+ "D"
913
+ ],
914
+ "description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n",
915
+ "target_delimiter": " ",
916
+ "fewshot_delimiter": "\n\n",
917
+ "fewshot_config": {
918
+ "sampler": "first_n"
919
+ },
920
+ "metric_list": [
921
+ {
922
+ "metric": "acc",
923
+ "aggregation": "mean",
924
+ "higher_is_better": true
925
+ }
926
+ ],
927
+ "output_type": "multiple_choice",
928
+ "repeats": 1,
929
+ "should_decontaminate": false,
930
+ "metadata": {
931
+ "version": 0.0
932
+ }
933
+ },
934
+ "mmlu_formal_logic": {
935
+ "task": "mmlu_formal_logic",
936
+ "task_alias": "formal_logic",
937
+ "group": "mmlu_humanities",
938
+ "group_alias": "humanities",
939
+ "dataset_path": "hails/mmlu_no_train",
940
+ "dataset_name": "formal_logic",
941
+ "test_split": "test",
942
+ "fewshot_split": "dev",
943
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
944
+ "doc_to_target": "answer",
945
+ "doc_to_choice": [
946
+ "A",
947
+ "B",
948
+ "C",
949
+ "D"
950
+ ],
951
+ "description": "The following are multiple choice questions (with answers) about formal logic.\n\n",
952
+ "target_delimiter": " ",
953
+ "fewshot_delimiter": "\n\n",
954
+ "fewshot_config": {
955
+ "sampler": "first_n"
956
+ },
957
+ "metric_list": [
958
+ {
959
+ "metric": "acc",
960
+ "aggregation": "mean",
961
+ "higher_is_better": true
962
+ }
963
+ ],
964
+ "output_type": "multiple_choice",
965
+ "repeats": 1,
966
+ "should_decontaminate": false,
967
+ "metadata": {
968
+ "version": 0.0
969
+ }
970
+ },
971
+ "mmlu_global_facts": {
972
+ "task": "mmlu_global_facts",
973
+ "task_alias": "global_facts",
974
+ "group": "mmlu_other",
975
+ "group_alias": "other",
976
+ "dataset_path": "hails/mmlu_no_train",
977
+ "dataset_name": "global_facts",
978
+ "test_split": "test",
979
+ "fewshot_split": "dev",
980
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
981
+ "doc_to_target": "answer",
982
+ "doc_to_choice": [
983
+ "A",
984
+ "B",
985
+ "C",
986
+ "D"
987
+ ],
988
+ "description": "The following are multiple choice questions (with answers) about global facts.\n\n",
989
+ "target_delimiter": " ",
990
+ "fewshot_delimiter": "\n\n",
991
+ "fewshot_config": {
992
+ "sampler": "first_n"
993
+ },
994
+ "metric_list": [
995
+ {
996
+ "metric": "acc",
997
+ "aggregation": "mean",
998
+ "higher_is_better": true
999
+ }
1000
+ ],
1001
+ "output_type": "multiple_choice",
1002
+ "repeats": 1,
1003
+ "should_decontaminate": false,
1004
+ "metadata": {
1005
+ "version": 0.0
1006
+ }
1007
+ },
1008
+ "mmlu_high_school_biology": {
1009
+ "task": "mmlu_high_school_biology",
1010
+ "task_alias": "high_school_biology",
1011
+ "group": "mmlu_stem",
1012
+ "group_alias": "stem",
1013
+ "dataset_path": "hails/mmlu_no_train",
1014
+ "dataset_name": "high_school_biology",
1015
+ "test_split": "test",
1016
+ "fewshot_split": "dev",
1017
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1018
+ "doc_to_target": "answer",
1019
+ "doc_to_choice": [
1020
+ "A",
1021
+ "B",
1022
+ "C",
1023
+ "D"
1024
+ ],
1025
+ "description": "The following are multiple choice questions (with answers) about high school biology.\n\n",
1026
+ "target_delimiter": " ",
1027
+ "fewshot_delimiter": "\n\n",
1028
+ "fewshot_config": {
1029
+ "sampler": "first_n"
1030
+ },
1031
+ "metric_list": [
1032
+ {
1033
+ "metric": "acc",
1034
+ "aggregation": "mean",
1035
+ "higher_is_better": true
1036
+ }
1037
+ ],
1038
+ "output_type": "multiple_choice",
1039
+ "repeats": 1,
1040
+ "should_decontaminate": false,
1041
+ "metadata": {
1042
+ "version": 0.0
1043
+ }
1044
+ },
1045
+ "mmlu_high_school_chemistry": {
1046
+ "task": "mmlu_high_school_chemistry",
1047
+ "task_alias": "high_school_chemistry",
1048
+ "group": "mmlu_stem",
1049
+ "group_alias": "stem",
1050
+ "dataset_path": "hails/mmlu_no_train",
1051
+ "dataset_name": "high_school_chemistry",
1052
+ "test_split": "test",
1053
+ "fewshot_split": "dev",
1054
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1055
+ "doc_to_target": "answer",
1056
+ "doc_to_choice": [
1057
+ "A",
1058
+ "B",
1059
+ "C",
1060
+ "D"
1061
+ ],
1062
+ "description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n",
1063
+ "target_delimiter": " ",
1064
+ "fewshot_delimiter": "\n\n",
1065
+ "fewshot_config": {
1066
+ "sampler": "first_n"
1067
+ },
1068
+ "metric_list": [
1069
+ {
1070
+ "metric": "acc",
1071
+ "aggregation": "mean",
1072
+ "higher_is_better": true
1073
+ }
1074
+ ],
1075
+ "output_type": "multiple_choice",
1076
+ "repeats": 1,
1077
+ "should_decontaminate": false,
1078
+ "metadata": {
1079
+ "version": 0.0
1080
+ }
1081
+ },
1082
+ "mmlu_high_school_computer_science": {
1083
+ "task": "mmlu_high_school_computer_science",
1084
+ "task_alias": "high_school_computer_science",
1085
+ "group": "mmlu_stem",
1086
+ "group_alias": "stem",
1087
+ "dataset_path": "hails/mmlu_no_train",
1088
+ "dataset_name": "high_school_computer_science",
1089
+ "test_split": "test",
1090
+ "fewshot_split": "dev",
1091
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1092
+ "doc_to_target": "answer",
1093
+ "doc_to_choice": [
1094
+ "A",
1095
+ "B",
1096
+ "C",
1097
+ "D"
1098
+ ],
1099
+ "description": "The following are multiple choice questions (with answers) about high school computer science.\n\n",
1100
+ "target_delimiter": " ",
1101
+ "fewshot_delimiter": "\n\n",
1102
+ "fewshot_config": {
1103
+ "sampler": "first_n"
1104
+ },
1105
+ "metric_list": [
1106
+ {
1107
+ "metric": "acc",
1108
+ "aggregation": "mean",
1109
+ "higher_is_better": true
1110
+ }
1111
+ ],
1112
+ "output_type": "multiple_choice",
1113
+ "repeats": 1,
1114
+ "should_decontaminate": false,
1115
+ "metadata": {
1116
+ "version": 0.0
1117
+ }
1118
+ },
1119
+ "mmlu_high_school_european_history": {
1120
+ "task": "mmlu_high_school_european_history",
1121
+ "task_alias": "high_school_european_history",
1122
+ "group": "mmlu_humanities",
1123
+ "group_alias": "humanities",
1124
+ "dataset_path": "hails/mmlu_no_train",
1125
+ "dataset_name": "high_school_european_history",
1126
+ "test_split": "test",
1127
+ "fewshot_split": "dev",
1128
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1129
+ "doc_to_target": "answer",
1130
+ "doc_to_choice": [
1131
+ "A",
1132
+ "B",
1133
+ "C",
1134
+ "D"
1135
+ ],
1136
+ "description": "The following are multiple choice questions (with answers) about high school european history.\n\n",
1137
+ "target_delimiter": " ",
1138
+ "fewshot_delimiter": "\n\n",
1139
+ "fewshot_config": {
1140
+ "sampler": "first_n"
1141
+ },
1142
+ "metric_list": [
1143
+ {
1144
+ "metric": "acc",
1145
+ "aggregation": "mean",
1146
+ "higher_is_better": true
1147
+ }
1148
+ ],
1149
+ "output_type": "multiple_choice",
1150
+ "repeats": 1,
1151
+ "should_decontaminate": false,
1152
+ "metadata": {
1153
+ "version": 0.0
1154
+ }
1155
+ },
1156
+ "mmlu_high_school_geography": {
1157
+ "task": "mmlu_high_school_geography",
1158
+ "task_alias": "high_school_geography",
1159
+ "group": "mmlu_social_sciences",
1160
+ "group_alias": "social_sciences",
1161
+ "dataset_path": "hails/mmlu_no_train",
1162
+ "dataset_name": "high_school_geography",
1163
+ "test_split": "test",
1164
+ "fewshot_split": "dev",
1165
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1166
+ "doc_to_target": "answer",
1167
+ "doc_to_choice": [
1168
+ "A",
1169
+ "B",
1170
+ "C",
1171
+ "D"
1172
+ ],
1173
+ "description": "The following are multiple choice questions (with answers) about high school geography.\n\n",
1174
+ "target_delimiter": " ",
1175
+ "fewshot_delimiter": "\n\n",
1176
+ "fewshot_config": {
1177
+ "sampler": "first_n"
1178
+ },
1179
+ "metric_list": [
1180
+ {
1181
+ "metric": "acc",
1182
+ "aggregation": "mean",
1183
+ "higher_is_better": true
1184
+ }
1185
+ ],
1186
+ "output_type": "multiple_choice",
1187
+ "repeats": 1,
1188
+ "should_decontaminate": false,
1189
+ "metadata": {
1190
+ "version": 0.0
1191
+ }
1192
+ },
1193
+ "mmlu_high_school_government_and_politics": {
1194
+ "task": "mmlu_high_school_government_and_politics",
1195
+ "task_alias": "high_school_government_and_politics",
1196
+ "group": "mmlu_social_sciences",
1197
+ "group_alias": "social_sciences",
1198
+ "dataset_path": "hails/mmlu_no_train",
1199
+ "dataset_name": "high_school_government_and_politics",
1200
+ "test_split": "test",
1201
+ "fewshot_split": "dev",
1202
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1203
+ "doc_to_target": "answer",
1204
+ "doc_to_choice": [
1205
+ "A",
1206
+ "B",
1207
+ "C",
1208
+ "D"
1209
+ ],
1210
+ "description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n",
1211
+ "target_delimiter": " ",
1212
+ "fewshot_delimiter": "\n\n",
1213
+ "fewshot_config": {
1214
+ "sampler": "first_n"
1215
+ },
1216
+ "metric_list": [
1217
+ {
1218
+ "metric": "acc",
1219
+ "aggregation": "mean",
1220
+ "higher_is_better": true
1221
+ }
1222
+ ],
1223
+ "output_type": "multiple_choice",
1224
+ "repeats": 1,
1225
+ "should_decontaminate": false,
1226
+ "metadata": {
1227
+ "version": 0.0
1228
+ }
1229
+ },
1230
+ "mmlu_high_school_macroeconomics": {
1231
+ "task": "mmlu_high_school_macroeconomics",
1232
+ "task_alias": "high_school_macroeconomics",
1233
+ "group": "mmlu_social_sciences",
1234
+ "group_alias": "social_sciences",
1235
+ "dataset_path": "hails/mmlu_no_train",
1236
+ "dataset_name": "high_school_macroeconomics",
1237
+ "test_split": "test",
1238
+ "fewshot_split": "dev",
1239
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1240
+ "doc_to_target": "answer",
1241
+ "doc_to_choice": [
1242
+ "A",
1243
+ "B",
1244
+ "C",
1245
+ "D"
1246
+ ],
1247
+ "description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n",
1248
+ "target_delimiter": " ",
1249
+ "fewshot_delimiter": "\n\n",
1250
+ "fewshot_config": {
1251
+ "sampler": "first_n"
1252
+ },
1253
+ "metric_list": [
1254
+ {
1255
+ "metric": "acc",
1256
+ "aggregation": "mean",
1257
+ "higher_is_better": true
1258
+ }
1259
+ ],
1260
+ "output_type": "multiple_choice",
1261
+ "repeats": 1,
1262
+ "should_decontaminate": false,
1263
+ "metadata": {
1264
+ "version": 0.0
1265
+ }
1266
+ },
1267
+ "mmlu_high_school_mathematics": {
1268
+ "task": "mmlu_high_school_mathematics",
1269
+ "task_alias": "high_school_mathematics",
1270
+ "group": "mmlu_stem",
1271
+ "group_alias": "stem",
1272
+ "dataset_path": "hails/mmlu_no_train",
1273
+ "dataset_name": "high_school_mathematics",
1274
+ "test_split": "test",
1275
+ "fewshot_split": "dev",
1276
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1277
+ "doc_to_target": "answer",
1278
+ "doc_to_choice": [
1279
+ "A",
1280
+ "B",
1281
+ "C",
1282
+ "D"
1283
+ ],
1284
+ "description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n",
1285
+ "target_delimiter": " ",
1286
+ "fewshot_delimiter": "\n\n",
1287
+ "fewshot_config": {
1288
+ "sampler": "first_n"
1289
+ },
1290
+ "metric_list": [
1291
+ {
1292
+ "metric": "acc",
1293
+ "aggregation": "mean",
1294
+ "higher_is_better": true
1295
+ }
1296
+ ],
1297
+ "output_type": "multiple_choice",
1298
+ "repeats": 1,
1299
+ "should_decontaminate": false,
1300
+ "metadata": {
1301
+ "version": 0.0
1302
+ }
1303
+ },
1304
+ "mmlu_high_school_microeconomics": {
1305
+ "task": "mmlu_high_school_microeconomics",
1306
+ "task_alias": "high_school_microeconomics",
1307
+ "group": "mmlu_social_sciences",
1308
+ "group_alias": "social_sciences",
1309
+ "dataset_path": "hails/mmlu_no_train",
1310
+ "dataset_name": "high_school_microeconomics",
1311
+ "test_split": "test",
1312
+ "fewshot_split": "dev",
1313
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1314
+ "doc_to_target": "answer",
1315
+ "doc_to_choice": [
1316
+ "A",
1317
+ "B",
1318
+ "C",
1319
+ "D"
1320
+ ],
1321
+ "description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n",
1322
+ "target_delimiter": " ",
1323
+ "fewshot_delimiter": "\n\n",
1324
+ "fewshot_config": {
1325
+ "sampler": "first_n"
1326
+ },
1327
+ "metric_list": [
1328
+ {
1329
+ "metric": "acc",
1330
+ "aggregation": "mean",
1331
+ "higher_is_better": true
1332
+ }
1333
+ ],
1334
+ "output_type": "multiple_choice",
1335
+ "repeats": 1,
1336
+ "should_decontaminate": false,
1337
+ "metadata": {
1338
+ "version": 0.0
1339
+ }
1340
+ },
1341
+ "mmlu_high_school_physics": {
1342
+ "task": "mmlu_high_school_physics",
1343
+ "task_alias": "high_school_physics",
1344
+ "group": "mmlu_stem",
1345
+ "group_alias": "stem",
1346
+ "dataset_path": "hails/mmlu_no_train",
1347
+ "dataset_name": "high_school_physics",
1348
+ "test_split": "test",
1349
+ "fewshot_split": "dev",
1350
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1351
+ "doc_to_target": "answer",
1352
+ "doc_to_choice": [
1353
+ "A",
1354
+ "B",
1355
+ "C",
1356
+ "D"
1357
+ ],
1358
+ "description": "The following are multiple choice questions (with answers) about high school physics.\n\n",
1359
+ "target_delimiter": " ",
1360
+ "fewshot_delimiter": "\n\n",
1361
+ "fewshot_config": {
1362
+ "sampler": "first_n"
1363
+ },
1364
+ "metric_list": [
1365
+ {
1366
+ "metric": "acc",
1367
+ "aggregation": "mean",
1368
+ "higher_is_better": true
1369
+ }
1370
+ ],
1371
+ "output_type": "multiple_choice",
1372
+ "repeats": 1,
1373
+ "should_decontaminate": false,
1374
+ "metadata": {
1375
+ "version": 0.0
1376
+ }
1377
+ },
1378
+ "mmlu_high_school_psychology": {
1379
+ "task": "mmlu_high_school_psychology",
1380
+ "task_alias": "high_school_psychology",
1381
+ "group": "mmlu_social_sciences",
1382
+ "group_alias": "social_sciences",
1383
+ "dataset_path": "hails/mmlu_no_train",
1384
+ "dataset_name": "high_school_psychology",
1385
+ "test_split": "test",
1386
+ "fewshot_split": "dev",
1387
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1388
+ "doc_to_target": "answer",
1389
+ "doc_to_choice": [
1390
+ "A",
1391
+ "B",
1392
+ "C",
1393
+ "D"
1394
+ ],
1395
+ "description": "The following are multiple choice questions (with answers) about high school psychology.\n\n",
1396
+ "target_delimiter": " ",
1397
+ "fewshot_delimiter": "\n\n",
1398
+ "fewshot_config": {
1399
+ "sampler": "first_n"
1400
+ },
1401
+ "metric_list": [
1402
+ {
1403
+ "metric": "acc",
1404
+ "aggregation": "mean",
1405
+ "higher_is_better": true
1406
+ }
1407
+ ],
1408
+ "output_type": "multiple_choice",
1409
+ "repeats": 1,
1410
+ "should_decontaminate": false,
1411
+ "metadata": {
1412
+ "version": 0.0
1413
+ }
1414
+ },
1415
+ "mmlu_high_school_statistics": {
1416
+ "task": "mmlu_high_school_statistics",
1417
+ "task_alias": "high_school_statistics",
1418
+ "group": "mmlu_stem",
1419
+ "group_alias": "stem",
1420
+ "dataset_path": "hails/mmlu_no_train",
1421
+ "dataset_name": "high_school_statistics",
1422
+ "test_split": "test",
1423
+ "fewshot_split": "dev",
1424
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1425
+ "doc_to_target": "answer",
1426
+ "doc_to_choice": [
1427
+ "A",
1428
+ "B",
1429
+ "C",
1430
+ "D"
1431
+ ],
1432
+ "description": "The following are multiple choice questions (with answers) about high school statistics.\n\n",
1433
+ "target_delimiter": " ",
1434
+ "fewshot_delimiter": "\n\n",
1435
+ "fewshot_config": {
1436
+ "sampler": "first_n"
1437
+ },
1438
+ "metric_list": [
1439
+ {
1440
+ "metric": "acc",
1441
+ "aggregation": "mean",
1442
+ "higher_is_better": true
1443
+ }
1444
+ ],
1445
+ "output_type": "multiple_choice",
1446
+ "repeats": 1,
1447
+ "should_decontaminate": false,
1448
+ "metadata": {
1449
+ "version": 0.0
1450
+ }
1451
+ },
1452
+ "mmlu_high_school_us_history": {
1453
+ "task": "mmlu_high_school_us_history",
1454
+ "task_alias": "high_school_us_history",
1455
+ "group": "mmlu_humanities",
1456
+ "group_alias": "humanities",
1457
+ "dataset_path": "hails/mmlu_no_train",
1458
+ "dataset_name": "high_school_us_history",
1459
+ "test_split": "test",
1460
+ "fewshot_split": "dev",
1461
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1462
+ "doc_to_target": "answer",
1463
+ "doc_to_choice": [
1464
+ "A",
1465
+ "B",
1466
+ "C",
1467
+ "D"
1468
+ ],
1469
+ "description": "The following are multiple choice questions (with answers) about high school us history.\n\n",
1470
+ "target_delimiter": " ",
1471
+ "fewshot_delimiter": "\n\n",
1472
+ "fewshot_config": {
1473
+ "sampler": "first_n"
1474
+ },
1475
+ "metric_list": [
1476
+ {
1477
+ "metric": "acc",
1478
+ "aggregation": "mean",
1479
+ "higher_is_better": true
1480
+ }
1481
+ ],
1482
+ "output_type": "multiple_choice",
1483
+ "repeats": 1,
1484
+ "should_decontaminate": false,
1485
+ "metadata": {
1486
+ "version": 0.0
1487
+ }
1488
+ },
1489
+ "mmlu_high_school_world_history": {
1490
+ "task": "mmlu_high_school_world_history",
1491
+ "task_alias": "high_school_world_history",
1492
+ "group": "mmlu_humanities",
1493
+ "group_alias": "humanities",
1494
+ "dataset_path": "hails/mmlu_no_train",
1495
+ "dataset_name": "high_school_world_history",
1496
+ "test_split": "test",
1497
+ "fewshot_split": "dev",
1498
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1499
+ "doc_to_target": "answer",
1500
+ "doc_to_choice": [
1501
+ "A",
1502
+ "B",
1503
+ "C",
1504
+ "D"
1505
+ ],
1506
+ "description": "The following are multiple choice questions (with answers) about high school world history.\n\n",
1507
+ "target_delimiter": " ",
1508
+ "fewshot_delimiter": "\n\n",
1509
+ "fewshot_config": {
1510
+ "sampler": "first_n"
1511
+ },
1512
+ "metric_list": [
1513
+ {
1514
+ "metric": "acc",
1515
+ "aggregation": "mean",
1516
+ "higher_is_better": true
1517
+ }
1518
+ ],
1519
+ "output_type": "multiple_choice",
1520
+ "repeats": 1,
1521
+ "should_decontaminate": false,
1522
+ "metadata": {
1523
+ "version": 0.0
1524
+ }
1525
+ },
1526
+ "mmlu_human_aging": {
1527
+ "task": "mmlu_human_aging",
1528
+ "task_alias": "human_aging",
1529
+ "group": "mmlu_other",
1530
+ "group_alias": "other",
1531
+ "dataset_path": "hails/mmlu_no_train",
1532
+ "dataset_name": "human_aging",
1533
+ "test_split": "test",
1534
+ "fewshot_split": "dev",
1535
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1536
+ "doc_to_target": "answer",
1537
+ "doc_to_choice": [
1538
+ "A",
1539
+ "B",
1540
+ "C",
1541
+ "D"
1542
+ ],
1543
+ "description": "The following are multiple choice questions (with answers) about human aging.\n\n",
1544
+ "target_delimiter": " ",
1545
+ "fewshot_delimiter": "\n\n",
1546
+ "fewshot_config": {
1547
+ "sampler": "first_n"
1548
+ },
1549
+ "metric_list": [
1550
+ {
1551
+ "metric": "acc",
1552
+ "aggregation": "mean",
1553
+ "higher_is_better": true
1554
+ }
1555
+ ],
1556
+ "output_type": "multiple_choice",
1557
+ "repeats": 1,
1558
+ "should_decontaminate": false,
1559
+ "metadata": {
1560
+ "version": 0.0
1561
+ }
1562
+ },
1563
+ "mmlu_human_sexuality": {
1564
+ "task": "mmlu_human_sexuality",
1565
+ "task_alias": "human_sexuality",
1566
+ "group": "mmlu_social_sciences",
1567
+ "group_alias": "social_sciences",
1568
+ "dataset_path": "hails/mmlu_no_train",
1569
+ "dataset_name": "human_sexuality",
1570
+ "test_split": "test",
1571
+ "fewshot_split": "dev",
1572
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1573
+ "doc_to_target": "answer",
1574
+ "doc_to_choice": [
1575
+ "A",
1576
+ "B",
1577
+ "C",
1578
+ "D"
1579
+ ],
1580
+ "description": "The following are multiple choice questions (with answers) about human sexuality.\n\n",
1581
+ "target_delimiter": " ",
1582
+ "fewshot_delimiter": "\n\n",
1583
+ "fewshot_config": {
1584
+ "sampler": "first_n"
1585
+ },
1586
+ "metric_list": [
1587
+ {
1588
+ "metric": "acc",
1589
+ "aggregation": "mean",
1590
+ "higher_is_better": true
1591
+ }
1592
+ ],
1593
+ "output_type": "multiple_choice",
1594
+ "repeats": 1,
1595
+ "should_decontaminate": false,
1596
+ "metadata": {
1597
+ "version": 0.0
1598
+ }
1599
+ },
1600
+ "mmlu_international_law": {
1601
+ "task": "mmlu_international_law",
1602
+ "task_alias": "international_law",
1603
+ "group": "mmlu_humanities",
1604
+ "group_alias": "humanities",
1605
+ "dataset_path": "hails/mmlu_no_train",
1606
+ "dataset_name": "international_law",
1607
+ "test_split": "test",
1608
+ "fewshot_split": "dev",
1609
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1610
+ "doc_to_target": "answer",
1611
+ "doc_to_choice": [
1612
+ "A",
1613
+ "B",
1614
+ "C",
1615
+ "D"
1616
+ ],
1617
+ "description": "The following are multiple choice questions (with answers) about international law.\n\n",
1618
+ "target_delimiter": " ",
1619
+ "fewshot_delimiter": "\n\n",
1620
+ "fewshot_config": {
1621
+ "sampler": "first_n"
1622
+ },
1623
+ "metric_list": [
1624
+ {
1625
+ "metric": "acc",
1626
+ "aggregation": "mean",
1627
+ "higher_is_better": true
1628
+ }
1629
+ ],
1630
+ "output_type": "multiple_choice",
1631
+ "repeats": 1,
1632
+ "should_decontaminate": false,
1633
+ "metadata": {
1634
+ "version": 0.0
1635
+ }
1636
+ },
1637
+ "mmlu_jurisprudence": {
1638
+ "task": "mmlu_jurisprudence",
1639
+ "task_alias": "jurisprudence",
1640
+ "group": "mmlu_humanities",
1641
+ "group_alias": "humanities",
1642
+ "dataset_path": "hails/mmlu_no_train",
1643
+ "dataset_name": "jurisprudence",
1644
+ "test_split": "test",
1645
+ "fewshot_split": "dev",
1646
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1647
+ "doc_to_target": "answer",
1648
+ "doc_to_choice": [
1649
+ "A",
1650
+ "B",
1651
+ "C",
1652
+ "D"
1653
+ ],
1654
+ "description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n",
1655
+ "target_delimiter": " ",
1656
+ "fewshot_delimiter": "\n\n",
1657
+ "fewshot_config": {
1658
+ "sampler": "first_n"
1659
+ },
1660
+ "metric_list": [
1661
+ {
1662
+ "metric": "acc",
1663
+ "aggregation": "mean",
1664
+ "higher_is_better": true
1665
+ }
1666
+ ],
1667
+ "output_type": "multiple_choice",
1668
+ "repeats": 1,
1669
+ "should_decontaminate": false,
1670
+ "metadata": {
1671
+ "version": 0.0
1672
+ }
1673
+ },
1674
+ "mmlu_logical_fallacies": {
1675
+ "task": "mmlu_logical_fallacies",
1676
+ "task_alias": "logical_fallacies",
1677
+ "group": "mmlu_humanities",
1678
+ "group_alias": "humanities",
1679
+ "dataset_path": "hails/mmlu_no_train",
1680
+ "dataset_name": "logical_fallacies",
1681
+ "test_split": "test",
1682
+ "fewshot_split": "dev",
1683
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1684
+ "doc_to_target": "answer",
1685
+ "doc_to_choice": [
1686
+ "A",
1687
+ "B",
1688
+ "C",
1689
+ "D"
1690
+ ],
1691
+ "description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n",
1692
+ "target_delimiter": " ",
1693
+ "fewshot_delimiter": "\n\n",
1694
+ "fewshot_config": {
1695
+ "sampler": "first_n"
1696
+ },
1697
+ "metric_list": [
1698
+ {
1699
+ "metric": "acc",
1700
+ "aggregation": "mean",
1701
+ "higher_is_better": true
1702
+ }
1703
+ ],
1704
+ "output_type": "multiple_choice",
1705
+ "repeats": 1,
1706
+ "should_decontaminate": false,
1707
+ "metadata": {
1708
+ "version": 0.0
1709
+ }
1710
+ },
1711
+ "mmlu_machine_learning": {
1712
+ "task": "mmlu_machine_learning",
1713
+ "task_alias": "machine_learning",
1714
+ "group": "mmlu_stem",
1715
+ "group_alias": "stem",
1716
+ "dataset_path": "hails/mmlu_no_train",
1717
+ "dataset_name": "machine_learning",
1718
+ "test_split": "test",
1719
+ "fewshot_split": "dev",
1720
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1721
+ "doc_to_target": "answer",
1722
+ "doc_to_choice": [
1723
+ "A",
1724
+ "B",
1725
+ "C",
1726
+ "D"
1727
+ ],
1728
+ "description": "The following are multiple choice questions (with answers) about machine learning.\n\n",
1729
+ "target_delimiter": " ",
1730
+ "fewshot_delimiter": "\n\n",
1731
+ "fewshot_config": {
1732
+ "sampler": "first_n"
1733
+ },
1734
+ "metric_list": [
1735
+ {
1736
+ "metric": "acc",
1737
+ "aggregation": "mean",
1738
+ "higher_is_better": true
1739
+ }
1740
+ ],
1741
+ "output_type": "multiple_choice",
1742
+ "repeats": 1,
1743
+ "should_decontaminate": false,
1744
+ "metadata": {
1745
+ "version": 0.0
1746
+ }
1747
+ },
1748
+ "mmlu_management": {
1749
+ "task": "mmlu_management",
1750
+ "task_alias": "management",
1751
+ "group": "mmlu_other",
1752
+ "group_alias": "other",
1753
+ "dataset_path": "hails/mmlu_no_train",
1754
+ "dataset_name": "management",
1755
+ "test_split": "test",
1756
+ "fewshot_split": "dev",
1757
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1758
+ "doc_to_target": "answer",
1759
+ "doc_to_choice": [
1760
+ "A",
1761
+ "B",
1762
+ "C",
1763
+ "D"
1764
+ ],
1765
+ "description": "The following are multiple choice questions (with answers) about management.\n\n",
1766
+ "target_delimiter": " ",
1767
+ "fewshot_delimiter": "\n\n",
1768
+ "fewshot_config": {
1769
+ "sampler": "first_n"
1770
+ },
1771
+ "metric_list": [
1772
+ {
1773
+ "metric": "acc",
1774
+ "aggregation": "mean",
1775
+ "higher_is_better": true
1776
+ }
1777
+ ],
1778
+ "output_type": "multiple_choice",
1779
+ "repeats": 1,
1780
+ "should_decontaminate": false,
1781
+ "metadata": {
1782
+ "version": 0.0
1783
+ }
1784
+ },
1785
+ "mmlu_marketing": {
1786
+ "task": "mmlu_marketing",
1787
+ "task_alias": "marketing",
1788
+ "group": "mmlu_other",
1789
+ "group_alias": "other",
1790
+ "dataset_path": "hails/mmlu_no_train",
1791
+ "dataset_name": "marketing",
1792
+ "test_split": "test",
1793
+ "fewshot_split": "dev",
1794
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1795
+ "doc_to_target": "answer",
1796
+ "doc_to_choice": [
1797
+ "A",
1798
+ "B",
1799
+ "C",
1800
+ "D"
1801
+ ],
1802
+ "description": "The following are multiple choice questions (with answers) about marketing.\n\n",
1803
+ "target_delimiter": " ",
1804
+ "fewshot_delimiter": "\n\n",
1805
+ "fewshot_config": {
1806
+ "sampler": "first_n"
1807
+ },
1808
+ "metric_list": [
1809
+ {
1810
+ "metric": "acc",
1811
+ "aggregation": "mean",
1812
+ "higher_is_better": true
1813
+ }
1814
+ ],
1815
+ "output_type": "multiple_choice",
1816
+ "repeats": 1,
1817
+ "should_decontaminate": false,
1818
+ "metadata": {
1819
+ "version": 0.0
1820
+ }
1821
+ },
1822
+ "mmlu_medical_genetics": {
1823
+ "task": "mmlu_medical_genetics",
1824
+ "task_alias": "medical_genetics",
1825
+ "group": "mmlu_other",
1826
+ "group_alias": "other",
1827
+ "dataset_path": "hails/mmlu_no_train",
1828
+ "dataset_name": "medical_genetics",
1829
+ "test_split": "test",
1830
+ "fewshot_split": "dev",
1831
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1832
+ "doc_to_target": "answer",
1833
+ "doc_to_choice": [
1834
+ "A",
1835
+ "B",
1836
+ "C",
1837
+ "D"
1838
+ ],
1839
+ "description": "The following are multiple choice questions (with answers) about medical genetics.\n\n",
1840
+ "target_delimiter": " ",
1841
+ "fewshot_delimiter": "\n\n",
1842
+ "fewshot_config": {
1843
+ "sampler": "first_n"
1844
+ },
1845
+ "metric_list": [
1846
+ {
1847
+ "metric": "acc",
1848
+ "aggregation": "mean",
1849
+ "higher_is_better": true
1850
+ }
1851
+ ],
1852
+ "output_type": "multiple_choice",
1853
+ "repeats": 1,
1854
+ "should_decontaminate": false,
1855
+ "metadata": {
1856
+ "version": 0.0
1857
+ }
1858
+ },
1859
+ "mmlu_miscellaneous": {
1860
+ "task": "mmlu_miscellaneous",
1861
+ "task_alias": "miscellaneous",
1862
+ "group": "mmlu_other",
1863
+ "group_alias": "other",
1864
+ "dataset_path": "hails/mmlu_no_train",
1865
+ "dataset_name": "miscellaneous",
1866
+ "test_split": "test",
1867
+ "fewshot_split": "dev",
1868
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1869
+ "doc_to_target": "answer",
1870
+ "doc_to_choice": [
1871
+ "A",
1872
+ "B",
1873
+ "C",
1874
+ "D"
1875
+ ],
1876
+ "description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n",
1877
+ "target_delimiter": " ",
1878
+ "fewshot_delimiter": "\n\n",
1879
+ "fewshot_config": {
1880
+ "sampler": "first_n"
1881
+ },
1882
+ "metric_list": [
1883
+ {
1884
+ "metric": "acc",
1885
+ "aggregation": "mean",
1886
+ "higher_is_better": true
1887
+ }
1888
+ ],
1889
+ "output_type": "multiple_choice",
1890
+ "repeats": 1,
1891
+ "should_decontaminate": false,
1892
+ "metadata": {
1893
+ "version": 0.0
1894
+ }
1895
+ },
1896
+ "mmlu_moral_disputes": {
1897
+ "task": "mmlu_moral_disputes",
1898
+ "task_alias": "moral_disputes",
1899
+ "group": "mmlu_humanities",
1900
+ "group_alias": "humanities",
1901
+ "dataset_path": "hails/mmlu_no_train",
1902
+ "dataset_name": "moral_disputes",
1903
+ "test_split": "test",
1904
+ "fewshot_split": "dev",
1905
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1906
+ "doc_to_target": "answer",
1907
+ "doc_to_choice": [
1908
+ "A",
1909
+ "B",
1910
+ "C",
1911
+ "D"
1912
+ ],
1913
+ "description": "The following are multiple choice questions (with answers) about moral disputes.\n\n",
1914
+ "target_delimiter": " ",
1915
+ "fewshot_delimiter": "\n\n",
1916
+ "fewshot_config": {
1917
+ "sampler": "first_n"
1918
+ },
1919
+ "metric_list": [
1920
+ {
1921
+ "metric": "acc",
1922
+ "aggregation": "mean",
1923
+ "higher_is_better": true
1924
+ }
1925
+ ],
1926
+ "output_type": "multiple_choice",
1927
+ "repeats": 1,
1928
+ "should_decontaminate": false,
1929
+ "metadata": {
1930
+ "version": 0.0
1931
+ }
1932
+ },
1933
+ "mmlu_moral_scenarios": {
1934
+ "task": "mmlu_moral_scenarios",
1935
+ "task_alias": "moral_scenarios",
1936
+ "group": "mmlu_humanities",
1937
+ "group_alias": "humanities",
1938
+ "dataset_path": "hails/mmlu_no_train",
1939
+ "dataset_name": "moral_scenarios",
1940
+ "test_split": "test",
1941
+ "fewshot_split": "dev",
1942
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1943
+ "doc_to_target": "answer",
1944
+ "doc_to_choice": [
1945
+ "A",
1946
+ "B",
1947
+ "C",
1948
+ "D"
1949
+ ],
1950
+ "description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n",
1951
+ "target_delimiter": " ",
1952
+ "fewshot_delimiter": "\n\n",
1953
+ "fewshot_config": {
1954
+ "sampler": "first_n"
1955
+ },
1956
+ "metric_list": [
1957
+ {
1958
+ "metric": "acc",
1959
+ "aggregation": "mean",
1960
+ "higher_is_better": true
1961
+ }
1962
+ ],
1963
+ "output_type": "multiple_choice",
1964
+ "repeats": 1,
1965
+ "should_decontaminate": false,
1966
+ "metadata": {
1967
+ "version": 0.0
1968
+ }
1969
+ },
1970
+ "mmlu_nutrition": {
1971
+ "task": "mmlu_nutrition",
1972
+ "task_alias": "nutrition",
1973
+ "group": "mmlu_other",
1974
+ "group_alias": "other",
1975
+ "dataset_path": "hails/mmlu_no_train",
1976
+ "dataset_name": "nutrition",
1977
+ "test_split": "test",
1978
+ "fewshot_split": "dev",
1979
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1980
+ "doc_to_target": "answer",
1981
+ "doc_to_choice": [
1982
+ "A",
1983
+ "B",
1984
+ "C",
1985
+ "D"
1986
+ ],
1987
+ "description": "The following are multiple choice questions (with answers) about nutrition.\n\n",
1988
+ "target_delimiter": " ",
1989
+ "fewshot_delimiter": "\n\n",
1990
+ "fewshot_config": {
1991
+ "sampler": "first_n"
1992
+ },
1993
+ "metric_list": [
1994
+ {
1995
+ "metric": "acc",
1996
+ "aggregation": "mean",
1997
+ "higher_is_better": true
1998
+ }
1999
+ ],
2000
+ "output_type": "multiple_choice",
2001
+ "repeats": 1,
2002
+ "should_decontaminate": false,
2003
+ "metadata": {
2004
+ "version": 0.0
2005
+ }
2006
+ },
2007
+ "mmlu_philosophy": {
2008
+ "task": "mmlu_philosophy",
2009
+ "task_alias": "philosophy",
2010
+ "group": "mmlu_humanities",
2011
+ "group_alias": "humanities",
2012
+ "dataset_path": "hails/mmlu_no_train",
2013
+ "dataset_name": "philosophy",
2014
+ "test_split": "test",
2015
+ "fewshot_split": "dev",
2016
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2017
+ "doc_to_target": "answer",
2018
+ "doc_to_choice": [
2019
+ "A",
2020
+ "B",
2021
+ "C",
2022
+ "D"
2023
+ ],
2024
+ "description": "The following are multiple choice questions (with answers) about philosophy.\n\n",
2025
+ "target_delimiter": " ",
2026
+ "fewshot_delimiter": "\n\n",
2027
+ "fewshot_config": {
2028
+ "sampler": "first_n"
2029
+ },
2030
+ "metric_list": [
2031
+ {
2032
+ "metric": "acc",
2033
+ "aggregation": "mean",
2034
+ "higher_is_better": true
2035
+ }
2036
+ ],
2037
+ "output_type": "multiple_choice",
2038
+ "repeats": 1,
2039
+ "should_decontaminate": false,
2040
+ "metadata": {
2041
+ "version": 0.0
2042
+ }
2043
+ },
2044
+ "mmlu_prehistory": {
2045
+ "task": "mmlu_prehistory",
2046
+ "task_alias": "prehistory",
2047
+ "group": "mmlu_humanities",
2048
+ "group_alias": "humanities",
2049
+ "dataset_path": "hails/mmlu_no_train",
2050
+ "dataset_name": "prehistory",
2051
+ "test_split": "test",
2052
+ "fewshot_split": "dev",
2053
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2054
+ "doc_to_target": "answer",
2055
+ "doc_to_choice": [
2056
+ "A",
2057
+ "B",
2058
+ "C",
2059
+ "D"
2060
+ ],
2061
+ "description": "The following are multiple choice questions (with answers) about prehistory.\n\n",
2062
+ "target_delimiter": " ",
2063
+ "fewshot_delimiter": "\n\n",
2064
+ "fewshot_config": {
2065
+ "sampler": "first_n"
2066
+ },
2067
+ "metric_list": [
2068
+ {
2069
+ "metric": "acc",
2070
+ "aggregation": "mean",
2071
+ "higher_is_better": true
2072
+ }
2073
+ ],
2074
+ "output_type": "multiple_choice",
2075
+ "repeats": 1,
2076
+ "should_decontaminate": false,
2077
+ "metadata": {
2078
+ "version": 0.0
2079
+ }
2080
+ },
2081
+ "mmlu_professional_accounting": {
2082
+ "task": "mmlu_professional_accounting",
2083
+ "task_alias": "professional_accounting",
2084
+ "group": "mmlu_other",
2085
+ "group_alias": "other",
2086
+ "dataset_path": "hails/mmlu_no_train",
2087
+ "dataset_name": "professional_accounting",
2088
+ "test_split": "test",
2089
+ "fewshot_split": "dev",
2090
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2091
+ "doc_to_target": "answer",
2092
+ "doc_to_choice": [
2093
+ "A",
2094
+ "B",
2095
+ "C",
2096
+ "D"
2097
+ ],
2098
+ "description": "The following are multiple choice questions (with answers) about professional accounting.\n\n",
2099
+ "target_delimiter": " ",
2100
+ "fewshot_delimiter": "\n\n",
2101
+ "fewshot_config": {
2102
+ "sampler": "first_n"
2103
+ },
2104
+ "metric_list": [
2105
+ {
2106
+ "metric": "acc",
2107
+ "aggregation": "mean",
2108
+ "higher_is_better": true
2109
+ }
2110
+ ],
2111
+ "output_type": "multiple_choice",
2112
+ "repeats": 1,
2113
+ "should_decontaminate": false,
2114
+ "metadata": {
2115
+ "version": 0.0
2116
+ }
2117
+ },
2118
+ "mmlu_professional_law": {
2119
+ "task": "mmlu_professional_law",
2120
+ "task_alias": "professional_law",
2121
+ "group": "mmlu_humanities",
2122
+ "group_alias": "humanities",
2123
+ "dataset_path": "hails/mmlu_no_train",
2124
+ "dataset_name": "professional_law",
2125
+ "test_split": "test",
2126
+ "fewshot_split": "dev",
2127
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2128
+ "doc_to_target": "answer",
2129
+ "doc_to_choice": [
2130
+ "A",
2131
+ "B",
2132
+ "C",
2133
+ "D"
2134
+ ],
2135
+ "description": "The following are multiple choice questions (with answers) about professional law.\n\n",
2136
+ "target_delimiter": " ",
2137
+ "fewshot_delimiter": "\n\n",
2138
+ "fewshot_config": {
2139
+ "sampler": "first_n"
2140
+ },
2141
+ "metric_list": [
2142
+ {
2143
+ "metric": "acc",
2144
+ "aggregation": "mean",
2145
+ "higher_is_better": true
2146
+ }
2147
+ ],
2148
+ "output_type": "multiple_choice",
2149
+ "repeats": 1,
2150
+ "should_decontaminate": false,
2151
+ "metadata": {
2152
+ "version": 0.0
2153
+ }
2154
+ },
2155
+ "mmlu_professional_medicine": {
2156
+ "task": "mmlu_professional_medicine",
2157
+ "task_alias": "professional_medicine",
2158
+ "group": "mmlu_other",
2159
+ "group_alias": "other",
2160
+ "dataset_path": "hails/mmlu_no_train",
2161
+ "dataset_name": "professional_medicine",
2162
+ "test_split": "test",
2163
+ "fewshot_split": "dev",
2164
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2165
+ "doc_to_target": "answer",
2166
+ "doc_to_choice": [
2167
+ "A",
2168
+ "B",
2169
+ "C",
2170
+ "D"
2171
+ ],
2172
+ "description": "The following are multiple choice questions (with answers) about professional medicine.\n\n",
2173
+ "target_delimiter": " ",
2174
+ "fewshot_delimiter": "\n\n",
2175
+ "fewshot_config": {
2176
+ "sampler": "first_n"
2177
+ },
2178
+ "metric_list": [
2179
+ {
2180
+ "metric": "acc",
2181
+ "aggregation": "mean",
2182
+ "higher_is_better": true
2183
+ }
2184
+ ],
2185
+ "output_type": "multiple_choice",
2186
+ "repeats": 1,
2187
+ "should_decontaminate": false,
2188
+ "metadata": {
2189
+ "version": 0.0
2190
+ }
2191
+ },
2192
+ "mmlu_professional_psychology": {
2193
+ "task": "mmlu_professional_psychology",
2194
+ "task_alias": "professional_psychology",
2195
+ "group": "mmlu_social_sciences",
2196
+ "group_alias": "social_sciences",
2197
+ "dataset_path": "hails/mmlu_no_train",
2198
+ "dataset_name": "professional_psychology",
2199
+ "test_split": "test",
2200
+ "fewshot_split": "dev",
2201
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2202
+ "doc_to_target": "answer",
2203
+ "doc_to_choice": [
2204
+ "A",
2205
+ "B",
2206
+ "C",
2207
+ "D"
2208
+ ],
2209
+ "description": "The following are multiple choice questions (with answers) about professional psychology.\n\n",
2210
+ "target_delimiter": " ",
2211
+ "fewshot_delimiter": "\n\n",
2212
+ "fewshot_config": {
2213
+ "sampler": "first_n"
2214
+ },
2215
+ "metric_list": [
2216
+ {
2217
+ "metric": "acc",
2218
+ "aggregation": "mean",
2219
+ "higher_is_better": true
2220
+ }
2221
+ ],
2222
+ "output_type": "multiple_choice",
2223
+ "repeats": 1,
2224
+ "should_decontaminate": false,
2225
+ "metadata": {
2226
+ "version": 0.0
2227
+ }
2228
+ },
2229
+ "mmlu_public_relations": {
2230
+ "task": "mmlu_public_relations",
2231
+ "task_alias": "public_relations",
2232
+ "group": "mmlu_social_sciences",
2233
+ "group_alias": "social_sciences",
2234
+ "dataset_path": "hails/mmlu_no_train",
2235
+ "dataset_name": "public_relations",
2236
+ "test_split": "test",
2237
+ "fewshot_split": "dev",
2238
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2239
+ "doc_to_target": "answer",
2240
+ "doc_to_choice": [
2241
+ "A",
2242
+ "B",
2243
+ "C",
2244
+ "D"
2245
+ ],
2246
+ "description": "The following are multiple choice questions (with answers) about public relations.\n\n",
2247
+ "target_delimiter": " ",
2248
+ "fewshot_delimiter": "\n\n",
2249
+ "fewshot_config": {
2250
+ "sampler": "first_n"
2251
+ },
2252
+ "metric_list": [
2253
+ {
2254
+ "metric": "acc",
2255
+ "aggregation": "mean",
2256
+ "higher_is_better": true
2257
+ }
2258
+ ],
2259
+ "output_type": "multiple_choice",
2260
+ "repeats": 1,
2261
+ "should_decontaminate": false,
2262
+ "metadata": {
2263
+ "version": 0.0
2264
+ }
2265
+ },
2266
+ "mmlu_security_studies": {
2267
+ "task": "mmlu_security_studies",
2268
+ "task_alias": "security_studies",
2269
+ "group": "mmlu_social_sciences",
2270
+ "group_alias": "social_sciences",
2271
+ "dataset_path": "hails/mmlu_no_train",
2272
+ "dataset_name": "security_studies",
2273
+ "test_split": "test",
2274
+ "fewshot_split": "dev",
2275
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2276
+ "doc_to_target": "answer",
2277
+ "doc_to_choice": [
2278
+ "A",
2279
+ "B",
2280
+ "C",
2281
+ "D"
2282
+ ],
2283
+ "description": "The following are multiple choice questions (with answers) about security studies.\n\n",
2284
+ "target_delimiter": " ",
2285
+ "fewshot_delimiter": "\n\n",
2286
+ "fewshot_config": {
2287
+ "sampler": "first_n"
2288
+ },
2289
+ "metric_list": [
2290
+ {
2291
+ "metric": "acc",
2292
+ "aggregation": "mean",
2293
+ "higher_is_better": true
2294
+ }
2295
+ ],
2296
+ "output_type": "multiple_choice",
2297
+ "repeats": 1,
2298
+ "should_decontaminate": false,
2299
+ "metadata": {
2300
+ "version": 0.0
2301
+ }
2302
+ },
2303
+ "mmlu_sociology": {
2304
+ "task": "mmlu_sociology",
2305
+ "task_alias": "sociology",
2306
+ "group": "mmlu_social_sciences",
2307
+ "group_alias": "social_sciences",
2308
+ "dataset_path": "hails/mmlu_no_train",
2309
+ "dataset_name": "sociology",
2310
+ "test_split": "test",
2311
+ "fewshot_split": "dev",
2312
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2313
+ "doc_to_target": "answer",
2314
+ "doc_to_choice": [
2315
+ "A",
2316
+ "B",
2317
+ "C",
2318
+ "D"
2319
+ ],
2320
+ "description": "The following are multiple choice questions (with answers) about sociology.\n\n",
2321
+ "target_delimiter": " ",
2322
+ "fewshot_delimiter": "\n\n",
2323
+ "fewshot_config": {
2324
+ "sampler": "first_n"
2325
+ },
2326
+ "metric_list": [
2327
+ {
2328
+ "metric": "acc",
2329
+ "aggregation": "mean",
2330
+ "higher_is_better": true
2331
+ }
2332
+ ],
2333
+ "output_type": "multiple_choice",
2334
+ "repeats": 1,
2335
+ "should_decontaminate": false,
2336
+ "metadata": {
2337
+ "version": 0.0
2338
+ }
2339
+ },
2340
+ "mmlu_us_foreign_policy": {
2341
+ "task": "mmlu_us_foreign_policy",
2342
+ "task_alias": "us_foreign_policy",
2343
+ "group": "mmlu_social_sciences",
2344
+ "group_alias": "social_sciences",
2345
+ "dataset_path": "hails/mmlu_no_train",
2346
+ "dataset_name": "us_foreign_policy",
2347
+ "test_split": "test",
2348
+ "fewshot_split": "dev",
2349
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2350
+ "doc_to_target": "answer",
2351
+ "doc_to_choice": [
2352
+ "A",
2353
+ "B",
2354
+ "C",
2355
+ "D"
2356
+ ],
2357
+ "description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n",
2358
+ "target_delimiter": " ",
2359
+ "fewshot_delimiter": "\n\n",
2360
+ "fewshot_config": {
2361
+ "sampler": "first_n"
2362
+ },
2363
+ "metric_list": [
2364
+ {
2365
+ "metric": "acc",
2366
+ "aggregation": "mean",
2367
+ "higher_is_better": true
2368
+ }
2369
+ ],
2370
+ "output_type": "multiple_choice",
2371
+ "repeats": 1,
2372
+ "should_decontaminate": false,
2373
+ "metadata": {
2374
+ "version": 0.0
2375
+ }
2376
+ },
2377
+ "mmlu_virology": {
2378
+ "task": "mmlu_virology",
2379
+ "task_alias": "virology",
2380
+ "group": "mmlu_other",
2381
+ "group_alias": "other",
2382
+ "dataset_path": "hails/mmlu_no_train",
2383
+ "dataset_name": "virology",
2384
+ "test_split": "test",
2385
+ "fewshot_split": "dev",
2386
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2387
+ "doc_to_target": "answer",
2388
+ "doc_to_choice": [
2389
+ "A",
2390
+ "B",
2391
+ "C",
2392
+ "D"
2393
+ ],
2394
+ "description": "The following are multiple choice questions (with answers) about virology.\n\n",
2395
+ "target_delimiter": " ",
2396
+ "fewshot_delimiter": "\n\n",
2397
+ "fewshot_config": {
2398
+ "sampler": "first_n"
2399
+ },
2400
+ "metric_list": [
2401
+ {
2402
+ "metric": "acc",
2403
+ "aggregation": "mean",
2404
+ "higher_is_better": true
2405
+ }
2406
+ ],
2407
+ "output_type": "multiple_choice",
2408
+ "repeats": 1,
2409
+ "should_decontaminate": false,
2410
+ "metadata": {
2411
+ "version": 0.0
2412
+ }
2413
+ },
2414
+ "mmlu_world_religions": {
2415
+ "task": "mmlu_world_religions",
2416
+ "task_alias": "world_religions",
2417
+ "group": "mmlu_humanities",
2418
+ "group_alias": "humanities",
2419
+ "dataset_path": "hails/mmlu_no_train",
2420
+ "dataset_name": "world_religions",
2421
+ "test_split": "test",
2422
+ "fewshot_split": "dev",
2423
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2424
+ "doc_to_target": "answer",
2425
+ "doc_to_choice": [
2426
+ "A",
2427
+ "B",
2428
+ "C",
2429
+ "D"
2430
+ ],
2431
+ "description": "The following are multiple choice questions (with answers) about world religions.\n\n",
2432
+ "target_delimiter": " ",
2433
+ "fewshot_delimiter": "\n\n",
2434
+ "fewshot_config": {
2435
+ "sampler": "first_n"
2436
+ },
2437
+ "metric_list": [
2438
+ {
2439
+ "metric": "acc",
2440
+ "aggregation": "mean",
2441
+ "higher_is_better": true
2442
+ }
2443
+ ],
2444
+ "output_type": "multiple_choice",
2445
+ "repeats": 1,
2446
+ "should_decontaminate": false,
2447
+ "metadata": {
2448
+ "version": 0.0
2449
+ }
2450
+ }
2451
+ },
2452
+ "versions": {
2453
+ "mmlu": "N/A",
2454
+ "mmlu_abstract_algebra": "Yaml",
2455
+ "mmlu_anatomy": "Yaml",
2456
+ "mmlu_astronomy": "Yaml",
2457
+ "mmlu_business_ethics": "Yaml",
2458
+ "mmlu_clinical_knowledge": "Yaml",
2459
+ "mmlu_college_biology": "Yaml",
2460
+ "mmlu_college_chemistry": "Yaml",
2461
+ "mmlu_college_computer_science": "Yaml",
2462
+ "mmlu_college_mathematics": "Yaml",
2463
+ "mmlu_college_medicine": "Yaml",
2464
+ "mmlu_college_physics": "Yaml",
2465
+ "mmlu_computer_security": "Yaml",
2466
+ "mmlu_conceptual_physics": "Yaml",
2467
+ "mmlu_econometrics": "Yaml",
2468
+ "mmlu_electrical_engineering": "Yaml",
2469
+ "mmlu_elementary_mathematics": "Yaml",
2470
+ "mmlu_formal_logic": "Yaml",
2471
+ "mmlu_global_facts": "Yaml",
2472
+ "mmlu_high_school_biology": "Yaml",
2473
+ "mmlu_high_school_chemistry": "Yaml",
2474
+ "mmlu_high_school_computer_science": "Yaml",
2475
+ "mmlu_high_school_european_history": "Yaml",
2476
+ "mmlu_high_school_geography": "Yaml",
2477
+ "mmlu_high_school_government_and_politics": "Yaml",
2478
+ "mmlu_high_school_macroeconomics": "Yaml",
2479
+ "mmlu_high_school_mathematics": "Yaml",
2480
+ "mmlu_high_school_microeconomics": "Yaml",
2481
+ "mmlu_high_school_physics": "Yaml",
2482
+ "mmlu_high_school_psychology": "Yaml",
2483
+ "mmlu_high_school_statistics": "Yaml",
2484
+ "mmlu_high_school_us_history": "Yaml",
2485
+ "mmlu_high_school_world_history": "Yaml",
2486
+ "mmlu_human_aging": "Yaml",
2487
+ "mmlu_human_sexuality": "Yaml",
2488
+ "mmlu_humanities": "N/A",
2489
+ "mmlu_international_law": "Yaml",
2490
+ "mmlu_jurisprudence": "Yaml",
2491
+ "mmlu_logical_fallacies": "Yaml",
2492
+ "mmlu_machine_learning": "Yaml",
2493
+ "mmlu_management": "Yaml",
2494
+ "mmlu_marketing": "Yaml",
2495
+ "mmlu_medical_genetics": "Yaml",
2496
+ "mmlu_miscellaneous": "Yaml",
2497
+ "mmlu_moral_disputes": "Yaml",
2498
+ "mmlu_moral_scenarios": "Yaml",
2499
+ "mmlu_nutrition": "Yaml",
2500
+ "mmlu_other": "N/A",
2501
+ "mmlu_philosophy": "Yaml",
2502
+ "mmlu_prehistory": "Yaml",
2503
+ "mmlu_professional_accounting": "Yaml",
2504
+ "mmlu_professional_law": "Yaml",
2505
+ "mmlu_professional_medicine": "Yaml",
2506
+ "mmlu_professional_psychology": "Yaml",
2507
+ "mmlu_public_relations": "Yaml",
2508
+ "mmlu_security_studies": "Yaml",
2509
+ "mmlu_social_sciences": "N/A",
2510
+ "mmlu_sociology": "Yaml",
2511
+ "mmlu_stem": "N/A",
2512
+ "mmlu_us_foreign_policy": "Yaml",
2513
+ "mmlu_virology": "Yaml",
2514
+ "mmlu_world_religions": "Yaml"
2515
+ },
2516
+ "n-shot": {
2517
+ "mmlu": 0,
2518
+ "mmlu_abstract_algebra": 0,
2519
+ "mmlu_anatomy": 0,
2520
+ "mmlu_astronomy": 0,
2521
+ "mmlu_business_ethics": 0,
2522
+ "mmlu_clinical_knowledge": 0,
2523
+ "mmlu_college_biology": 0,
2524
+ "mmlu_college_chemistry": 0,
2525
+ "mmlu_college_computer_science": 0,
2526
+ "mmlu_college_mathematics": 0,
2527
+ "mmlu_college_medicine": 0,
2528
+ "mmlu_college_physics": 0,
2529
+ "mmlu_computer_security": 0,
2530
+ "mmlu_conceptual_physics": 0,
2531
+ "mmlu_econometrics": 0,
2532
+ "mmlu_electrical_engineering": 0,
2533
+ "mmlu_elementary_mathematics": 0,
2534
+ "mmlu_formal_logic": 0,
2535
+ "mmlu_global_facts": 0,
2536
+ "mmlu_high_school_biology": 0,
2537
+ "mmlu_high_school_chemistry": 0,
2538
+ "mmlu_high_school_computer_science": 0,
2539
+ "mmlu_high_school_european_history": 0,
2540
+ "mmlu_high_school_geography": 0,
2541
+ "mmlu_high_school_government_and_politics": 0,
2542
+ "mmlu_high_school_macroeconomics": 0,
2543
+ "mmlu_high_school_mathematics": 0,
2544
+ "mmlu_high_school_microeconomics": 0,
2545
+ "mmlu_high_school_physics": 0,
2546
+ "mmlu_high_school_psychology": 0,
2547
+ "mmlu_high_school_statistics": 0,
2548
+ "mmlu_high_school_us_history": 0,
2549
+ "mmlu_high_school_world_history": 0,
2550
+ "mmlu_human_aging": 0,
2551
+ "mmlu_human_sexuality": 0,
2552
+ "mmlu_humanities": 0,
2553
+ "mmlu_international_law": 0,
2554
+ "mmlu_jurisprudence": 0,
2555
+ "mmlu_logical_fallacies": 0,
2556
+ "mmlu_machine_learning": 0,
2557
+ "mmlu_management": 0,
2558
+ "mmlu_marketing": 0,
2559
+ "mmlu_medical_genetics": 0,
2560
+ "mmlu_miscellaneous": 0,
2561
+ "mmlu_moral_disputes": 0,
2562
+ "mmlu_moral_scenarios": 0,
2563
+ "mmlu_nutrition": 0,
2564
+ "mmlu_other": 0,
2565
+ "mmlu_philosophy": 0,
2566
+ "mmlu_prehistory": 0,
2567
+ "mmlu_professional_accounting": 0,
2568
+ "mmlu_professional_law": 0,
2569
+ "mmlu_professional_medicine": 0,
2570
+ "mmlu_professional_psychology": 0,
2571
+ "mmlu_public_relations": 0,
2572
+ "mmlu_security_studies": 0,
2573
+ "mmlu_social_sciences": 0,
2574
+ "mmlu_sociology": 0,
2575
+ "mmlu_stem": 0,
2576
+ "mmlu_us_foreign_policy": 0,
2577
+ "mmlu_virology": 0,
2578
+ "mmlu_world_religions": 0
2579
+ },
2580
+ "config": {
2581
+ "model": "hf",
2582
+ "model_args": "pretrained=baichuan-inc/Baichuan2-7B-Base,trust_remote_code=True,load_in_4bit=True,peft=./out/lora/p13",
2583
+ "batch_size": "16",
2584
+ "batch_sizes": [],
2585
+ "device": "cuda:0",
2586
+ "use_cache": null,
2587
+ "limit": null,
2588
+ "bootstrap_iters": 100000,
2589
+ "gen_kwargs": null
2590
+ },
2591
+ "git_hash": "dd6c6de"
2592
+ }
log.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79452955be6b419a65984273a9f08af86042e1c2a75ee3ba989cbf620a133cc2
3
+ size 2001107
tokenizer_config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "auto_map": {
5
+ "AutoTokenizer": [
6
+ "baichuan-inc/Baichuan2-7B-Base--tokenization_baichuan.BaichuanTokenizer",
7
+ null
8
+ ]
9
+ },
10
+ "bos_token": {
11
+ "__type": "AddedToken",
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "clean_up_tokenization_spaces": false,
19
+ "eos_token": {
20
+ "__type": "AddedToken",
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": true
26
+ },
27
+ "model_max_length": 4096,
28
+ "pad_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": true
35
+ },
36
+ "sp_model_kwargs": {},
37
+ "tokenizer_class": "BaichuanTokenizer",
38
+ "unk_token": {
39
+ "__type": "AddedToken",
40
+ "content": "<unk>",
41
+ "lstrip": false,
42
+ "normalized": true,
43
+ "rstrip": false,
44
+ "single_word": true
45
+ },
46
+ "use_fast": false
47
+ }