Oskar Douwe van der Wal commited on
Commit
0ed1e40
·
1 Parent(s): 1f2f281

New results

Browse files
pythia-410m-seed4/step131000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-50-15.500953.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.983,
5
+ "acc_stderr,none": 0.004089954489689093,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step131000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step131000",
58
+ "model_sha": "ff66aa37de6d137d8ce0761506e2e77477a11323",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055367.053396,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175959.544550214,
81
+ "end_time": 3176027.708404874,
82
+ "total_evaluation_time_seconds": "68.16385466000065"
83
+ }
pythia-410m-seed4/step132000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-51-51.556038.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.99,
5
+ "acc_stderr,none": 0.003148000938676753,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step132000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step132000",
58
+ "model_sha": "f0271f813393ae3c9483d19c1602c68c45aaaef3",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055462.0212724,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3176036.259022766,
81
+ "end_time": 3176123.763345007,
82
+ "total_evaluation_time_seconds": "87.50432224106044"
83
+ }
pythia-410m-seed4/step133000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-53-03.726218.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.99,
5
+ "acc_stderr,none": 0.0031480009386767763,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step133000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step133000",
58
+ "model_sha": "f1dee9051011ae3b7bd191a82db07bb8c71cb702",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055537.8769426,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3176131.064966063,
81
+ "end_time": 3176195.932225916,
82
+ "total_evaluation_time_seconds": "64.86725985305384"
83
+ }
pythia-410m-seed4/step134000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-54-18.255322.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.989,
5
+ "acc_stderr,none": 0.0032999833166078166,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step134000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step134000",
58
+ "model_sha": "e354737e2fd7ba0f1673dbfbe4bf34a272f50e28",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055610.262466,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3176203.242230205,
81
+ "end_time": 3176270.461175652,
82
+ "total_evaluation_time_seconds": "67.21894544735551"
83
+ }
pythia-410m-seed4/step135000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-55-33.700012.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.985,
5
+ "acc_stderr,none": 0.003845749574503007,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step135000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step135000",
58
+ "model_sha": "54a96eca627d899c31f0683f624d70bc8a6cfc89",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055685.3841197,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3176277.804626787,
81
+ "end_time": 3176345.907884312,
82
+ "total_evaluation_time_seconds": "68.10325752478093"
83
+ }
pythia-410m-seed4/step136000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-56-49.802270.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.988,
5
+ "acc_stderr,none": 0.003444977194099855,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step136000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step136000",
58
+ "model_sha": "5c19f7278799fea0c8fa1c5f8f53b383331cefae",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055760.128607,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3176353.262396983,
81
+ "end_time": 3176422.009591882,
82
+ "total_evaluation_time_seconds": "68.7471948992461"
83
+ }
pythia-410m-seed4/step137000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-58-03.950399.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.992,
5
+ "acc_stderr,none": 0.0028185003005045057,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step137000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step137000",
58
+ "model_sha": "d65308aeab729c0d23df7017aaa2cbe539b2c123",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055835.8480852,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3176429.231099293,
81
+ "end_time": 3176496.157780495,
82
+ "total_evaluation_time_seconds": "66.92668120190501"
83
+ }
pythia-410m-seed4/step138000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-59-19.595966.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.988,
5
+ "acc_stderr,none": 0.0034449771940998058,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step138000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step138000",
58
+ "model_sha": "ad1c86215d4f4a64469b0d3dcf38f0d4481b431b",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055911.8370852,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3176505.060849075,
81
+ "end_time": 3176571.803014195,
82
+ "total_evaluation_time_seconds": "66.74216511985287"
83
+ }
pythia-410m-seed4/step139000/EleutherAI__pythia-410m-seed4/results_2024-09-11T05-00-36.665011.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.997,
5
+ "acc_stderr,none": 0.0017303161543469332,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step139000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step139000",
58
+ "model_sha": "50ee69ef520685f3a74c034244aed007bc4ef038",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055988.3239312,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2200.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3176579.599937829,
81
+ "end_time": 3176648.871872197,
82
+ "total_evaluation_time_seconds": "69.27193436771631"
83
+ }
pythia-410m-seed4/step140000/EleutherAI__pythia-410m-seed4/results_2024-09-11T05-01-56.026923.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.986,
5
+ "acc_stderr,none": 0.0037172325482565504,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step140000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step140000",
58
+ "model_sha": "74147cd096fbd6ee7748236c27b898ce08810da6",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726056065.355573,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2200.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3176656.904097611,
81
+ "end_time": 3176728.234131931,
82
+ "total_evaluation_time_seconds": "71.33003431977704"
83
+ }