lewtun HF Staff commited on
Commit
e991929
·
verified ·
1 Parent(s): 8e82a16

Upload eval_results/open-r1/R1-Zero-Qwen-7B-Math/v04.10-step-000000315/aime24/results_2025-04-12T13-15-20.432867.json with huggingface_hub

Browse files
eval_results/open-r1/R1-Zero-Qwen-7B-Math/v04.10-step-000000315/aime24/results_2025-04-12T13-15-20.432867.json ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config_general": {
3
+ "lighteval_sha": "?",
4
+ "num_fewshot_seeds": 1,
5
+ "override_batch_size": -1,
6
+ "max_samples": null,
7
+ "job_id": 0,
8
+ "start_time": 1449348.508015334,
9
+ "end_time": 1450512.589093624,
10
+ "total_evaluation_time_secondes": "1164.081078290008",
11
+ "model_name": "open-r1/R1-Zero-Qwen-7B-Math",
12
+ "model_sha": "",
13
+ "model_dtype": null,
14
+ "model_size": null,
15
+ "generation_parameters": {
16
+ "early_stopping": null,
17
+ "repetition_penalty": null,
18
+ "frequency_penalty": null,
19
+ "length_penalty": null,
20
+ "presence_penalty": null,
21
+ "max_new_tokens": 32768,
22
+ "min_new_tokens": null,
23
+ "seed": null,
24
+ "stop_tokens": null,
25
+ "temperature": 0.6,
26
+ "top_k": null,
27
+ "min_p": null,
28
+ "top_p": 0.95,
29
+ "truncate_prompt": null,
30
+ "response_format": null
31
+ }
32
+ },
33
+ "results": {
34
+ "lighteval|aime24|0": {
35
+ "math_pass@1:32_samples": 0.12395833333333332,
36
+ "math_pass@1:32_samples_stderr": 0.048400310930613616,
37
+ "extractive_match": 0.1,
38
+ "extractive_match_stderr": 0.055708601453115555
39
+ },
40
+ "all": {
41
+ "math_pass@1:32_samples": 0.12395833333333332,
42
+ "math_pass@1:32_samples_stderr": 0.048400310930613616,
43
+ "extractive_match": 0.1,
44
+ "extractive_match_stderr": 0.055708601453115555
45
+ }
46
+ },
47
+ "versions": {
48
+ "lighteval|aime24|0": 1
49
+ },
50
+ "config_tasks": {
51
+ "lighteval|aime24": {
52
+ "name": "aime24",
53
+ "prompt_function": "aime_prompt_fn",
54
+ "hf_repo": "HuggingFaceH4/aime_2024",
55
+ "hf_subset": "default",
56
+ "metric": [
57
+ {
58
+ "metric_name": "extractive_match",
59
+ "higher_is_better": true,
60
+ "category": "3",
61
+ "use_case": "1",
62
+ "sample_level_fn": "sample_level_fn",
63
+ "corpus_level_fn": "mean"
64
+ },
65
+ {
66
+ "metric_name": "math_pass@1:32_samples",
67
+ "higher_is_better": true,
68
+ "category": "5",
69
+ "use_case": "6",
70
+ "sample_level_fn": "compute",
71
+ "corpus_level_fn": "mean"
72
+ }
73
+ ],
74
+ "hf_revision": null,
75
+ "hf_filter": null,
76
+ "hf_avail_splits": [
77
+ "train"
78
+ ],
79
+ "trust_dataset": false,
80
+ "evaluation_splits": [
81
+ "train"
82
+ ],
83
+ "few_shots_split": null,
84
+ "few_shots_select": null,
85
+ "generation_size": 32768,
86
+ "generation_grammar": null,
87
+ "stop_sequence": [],
88
+ "num_samples": null,
89
+ "suite": [
90
+ "lighteval"
91
+ ],
92
+ "original_num_docs": 30,
93
+ "effective_num_docs": 30,
94
+ "must_remove_duplicate_docs": false,
95
+ "version": 1
96
+ }
97
+ },
98
+ "summary_tasks": {
99
+ "lighteval|aime24|0": {
100
+ "hashes": {
101
+ "hash_examples": "34f73561248f264c",
102
+ "hash_full_prompts": "bc5bcea127c57fb8",
103
+ "hash_input_tokens": "8d44aeb2039ae950",
104
+ "hash_cont_tokens": "9f18fbb8f60e40ef"
105
+ },
106
+ "truncated": 0,
107
+ "non_truncated": 60,
108
+ "padded": 0,
109
+ "non_padded": 60,
110
+ "effective_few_shots": 0.0,
111
+ "num_truncated_few_shots": 0
112
+ }
113
+ },
114
+ "summary_general": {
115
+ "hashes": {
116
+ "hash_examples": "772678c254b88603",
117
+ "hash_full_prompts": "ce17217291012a57",
118
+ "hash_input_tokens": "191b20d776c281b9",
119
+ "hash_cont_tokens": "a6968d36c504a7b4"
120
+ },
121
+ "truncated": 0,
122
+ "non_truncated": 60,
123
+ "padded": 0,
124
+ "non_padded": 60,
125
+ "num_truncated_few_shots": 0
126
+ }
127
+ }