elichen3051 commited on
Commit
2f29375
·
verified ·
1 Parent(s): 3def924

Adding aggregated results for meta-llama/Llama-3.2-1B-Instruct

Browse files
meta-llama/Llama-3.2-1B-Instruct/results_2025-01-03T14-33-28.476520.json ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "openbookqa": {
4
+ "alias": "openbookqa",
5
+ "acc,none": 0.244,
6
+ "acc_stderr,none": 0.019226734893614636,
7
+ "acc_norm,none": 0.342,
8
+ "acc_norm_stderr,none": 0.021236147199899316
9
+ }
10
+ },
11
+ "group_subtasks": {
12
+ "openbookqa": []
13
+ },
14
+ "configs": {
15
+ "openbookqa": {
16
+ "task": "openbookqa",
17
+ "dataset_path": "openbookqa",
18
+ "dataset_name": "main",
19
+ "training_split": "train",
20
+ "validation_split": "validation",
21
+ "test_split": "test",
22
+ "doc_to_text": "question_stem",
23
+ "doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
24
+ "doc_to_choice": "{{choices.text}}",
25
+ "description": "",
26
+ "target_delimiter": " ",
27
+ "fewshot_delimiter": "\n\n",
28
+ "num_fewshot": 0,
29
+ "metric_list": [
30
+ {
31
+ "metric": "acc",
32
+ "aggregation": "mean",
33
+ "higher_is_better": true
34
+ },
35
+ {
36
+ "metric": "acc_norm",
37
+ "aggregation": "mean",
38
+ "higher_is_better": true
39
+ }
40
+ ],
41
+ "output_type": "multiple_choice",
42
+ "repeats": 1,
43
+ "should_decontaminate": true,
44
+ "doc_to_decontamination_query": "question_stem",
45
+ "metadata": {
46
+ "version": 1.0
47
+ }
48
+ }
49
+ },
50
+ "versions": {
51
+ "openbookqa": 1.0
52
+ },
53
+ "n-shot": {
54
+ "openbookqa": 0
55
+ },
56
+ "higher_is_better": {
57
+ "openbookqa": {
58
+ "acc": true,
59
+ "acc_norm": true
60
+ }
61
+ },
62
+ "n-samples": {
63
+ "openbookqa": {
64
+ "original": 500,
65
+ "effective": 500
66
+ }
67
+ },
68
+ "config": {
69
+ "model": "hf",
70
+ "model_args": "pretrained=meta-llama/Llama-3.2-1B-Instruct",
71
+ "model_num_parameters": 1235814400,
72
+ "model_dtype": "torch.bfloat16",
73
+ "model_revision": "main",
74
+ "model_sha": "9213176726f574b556790deb65791e0c5aa438b6",
75
+ "batch_size": "auto:4",
76
+ "batch_sizes": [
77
+ 64,
78
+ 64,
79
+ 64,
80
+ 64
81
+ ],
82
+ "device": null,
83
+ "use_cache": null,
84
+ "limit": null,
85
+ "bootstrap_iters": 100000,
86
+ "gen_kwargs": null,
87
+ "random_seed": 0,
88
+ "numpy_seed": 1234,
89
+ "torch_seed": 1234,
90
+ "fewshot_seed": 1234
91
+ },
92
+ "git_hash": null,
93
+ "date": 1735885993.394094,
94
+ "pretty_env_info": "'NoneType' object has no attribute 'splitlines'",
95
+ "transformers_version": "4.47.1",
96
+ "upper_git_hash": null,
97
+ "tokenizer_pad_token": [
98
+ "<|eot_id|>",
99
+ "128009"
100
+ ],
101
+ "tokenizer_eos_token": [
102
+ "<|eot_id|>",
103
+ "128009"
104
+ ],
105
+ "tokenizer_bos_token": [
106
+ "<|begin_of_text|>",
107
+ "128000"
108
+ ],
109
+ "eot_token_id": 128009,
110
+ "max_length": 131072,
111
+ "task_hashes": {},
112
+ "model_source": "hf",
113
+ "model_name": "meta-llama/Llama-3.2-1B-Instruct",
114
+ "model_name_sanitized": "meta-llama__Llama-3.2-1B-Instruct",
115
+ "system_instruction": null,
116
+ "system_instruction_sha": null,
117
+ "fewshot_as_multiturn": false,
118
+ "chat_template": null,
119
+ "chat_template_sha": null,
120
+ "start_time": 579584.971908556,
121
+ "end_time": 579604.088721181,
122
+ "total_evaluation_time_seconds": "19.116812625085004"
123
+ }