Oskar Douwe van der Wal commited on
Commit
1f2f281
·
1 Parent(s): 3da0ab7

New results

Browse files
Files changed (40) hide show
  1. pythia-410m-seed4/step100000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-10-07.222533.json +83 -0
  2. pythia-410m-seed4/step101000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-11-21.424567.json +83 -0
  3. pythia-410m-seed4/step102000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-12-39.566858.json +83 -0
  4. pythia-410m-seed4/step103000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-13-57.544058.json +83 -0
  5. pythia-410m-seed4/step104000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-15-14.702668.json +83 -0
  6. pythia-410m-seed4/step105000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-16-30.923344.json +83 -0
  7. pythia-410m-seed4/step106000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-17-47.702419.json +83 -0
  8. pythia-410m-seed4/step107000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-19-05.374648.json +83 -0
  9. pythia-410m-seed4/step108000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-20-21.666616.json +83 -0
  10. pythia-410m-seed4/step109000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-21-43.752777.json +83 -0
  11. pythia-410m-seed4/step110000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-22-59.465654.json +83 -0
  12. pythia-410m-seed4/step111000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-24-16.736732.json +83 -0
  13. pythia-410m-seed4/step112000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-25-34.394003.json +83 -0
  14. pythia-410m-seed4/step113000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-26-50.985348.json +83 -0
  15. pythia-410m-seed4/step114000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-28-07.607695.json +83 -0
  16. pythia-410m-seed4/step115000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-29-28.107074.json +83 -0
  17. pythia-410m-seed4/step116000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-30-46.095399.json +83 -0
  18. pythia-410m-seed4/step117000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-32-05.604265.json +83 -0
  19. pythia-410m-seed4/step118000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-33-25.534289.json +83 -0
  20. pythia-410m-seed4/step119000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-34-43.598284.json +83 -0
  21. pythia-410m-seed4/step120000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-36-00.802272.json +83 -0
  22. pythia-410m-seed4/step121000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-37-18.289072.json +83 -0
  23. pythia-410m-seed4/step122000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-38-37.810381.json +83 -0
  24. pythia-410m-seed4/step123000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-39-54.410053.json +83 -0
  25. pythia-410m-seed4/step124000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-41-13.186597.json +83 -0
  26. pythia-410m-seed4/step125000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-42-29.407021.json +83 -0
  27. pythia-410m-seed4/step126000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-43-47.482809.json +83 -0
  28. pythia-410m-seed4/step127000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-45-05.196071.json +83 -0
  29. pythia-410m-seed4/step128000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-46-24.606850.json +83 -0
  30. pythia-410m-seed4/step129000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-47-42.120585.json +83 -0
  31. pythia-410m-seed4/step130000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-48-59.445686.json +83 -0
  32. pythia-410m-seed4/step91000/EleutherAI__pythia-410m-seed4/results_2024-09-11T03-58-05.643985.json +83 -0
  33. pythia-410m-seed4/step92000/EleutherAI__pythia-410m-seed4/results_2024-09-11T03-59-37.043072.json +83 -0
  34. pythia-410m-seed4/step93000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-00-57.709535.json +83 -0
  35. pythia-410m-seed4/step94000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-02-16.617192.json +83 -0
  36. pythia-410m-seed4/step95000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-03-36.035116.json +83 -0
  37. pythia-410m-seed4/step96000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-04-55.889680.json +83 -0
  38. pythia-410m-seed4/step97000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-06-14.033637.json +83 -0
  39. pythia-410m-seed4/step98000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-07-31.282587.json +83 -0
  40. pythia-410m-seed4/step99000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-08-48.682453.json +83 -0
pythia-410m-seed4/step100000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-10-07.222533.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.996,
5
+ "acc_stderr,none": 0.00199699473909873,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step100000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step100000",
58
+ "model_sha": "fc02928b4647d9b4ed6e9e76d0113ee71e774cdf",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726052955.276055,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3173548.23821233,
81
+ "end_time": 3173619.4312918,
82
+ "total_evaluation_time_seconds": "71.19307947019115"
83
+ }
pythia-410m-seed4/step101000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-11-21.424567.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.996,
5
+ "acc_stderr,none": 0.00199699473909873,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step101000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step101000",
58
+ "model_sha": "32bccb361ac58c4b97920d63586f0bdbf227204b",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053033.9447381,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3173626.843071398,
81
+ "end_time": 3173693.631781758,
82
+ "total_evaluation_time_seconds": "66.78871035994962"
83
+ }
pythia-410m-seed4/step102000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-12-39.566858.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.995,
5
+ "acc_stderr,none": 0.002231586874844886,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step102000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step102000",
58
+ "model_sha": "71cf7505dffcd67c813cc8aa7e45e9cc0e05a169",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053107.9609904,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3173700.977301363,
81
+ "end_time": 3173771.773960556,
82
+ "total_evaluation_time_seconds": "70.79665919300169"
83
+ }
pythia-410m-seed4/step103000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-13-57.544058.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.994,
5
+ "acc_stderr,none": 0.002443352199329816,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step103000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step103000",
58
+ "model_sha": "239c51bdbcd99382521cf3273589dc4596ddf7b4",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053186.083121,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2200.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3173779.237803759,
81
+ "end_time": 3173849.751412573,
82
+ "total_evaluation_time_seconds": "70.51360881375149"
83
+ }
pythia-410m-seed4/step104000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-15-14.702668.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.995,
5
+ "acc_stderr,none": 0.002231586874844882,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step104000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step104000",
58
+ "model_sha": "e99eb8c34b10cb5f540cb3a12cce271b7aa7cd66",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053264.008169,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3173857.041988569,
81
+ "end_time": 3173926.909441375,
82
+ "total_evaluation_time_seconds": "69.86745280632749"
83
+ }
pythia-410m-seed4/step105000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-16-30.923344.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.998,
5
+ "acc_stderr,none": 0.0014135055705578033,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step105000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step105000",
58
+ "model_sha": "55ab3c7a57bf37e3004f2ef86fcce336f22a005e",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053341.1783333,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3173934.266172137,
81
+ "end_time": 3174003.130609353,
82
+ "total_evaluation_time_seconds": "68.86443721596152"
83
+ }
pythia-410m-seed4/step106000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-17-47.702419.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.996,
5
+ "acc_stderr,none": 0.00199699473909873,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step106000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step106000",
58
+ "model_sha": "e64928f8412a9cd23376f0f93532238f2157b6f8",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053417.4608786,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174010.393538129,
81
+ "end_time": 3174079.909640073,
82
+ "total_evaluation_time_seconds": "69.51610194379464"
83
+ }
pythia-410m-seed4/step107000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-19-05.374648.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.995,
5
+ "acc_stderr,none": 0.0022315868748448804,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step107000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step107000",
58
+ "model_sha": "06159e6aa8d6630d1b900bac4c5adecd355498a0",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053494.2480311,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174087.36540095,
81
+ "end_time": 3174157.582181613,
82
+ "total_evaluation_time_seconds": "70.21678066300228"
83
+ }
pythia-410m-seed4/step108000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-20-21.666616.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.996,
5
+ "acc_stderr,none": 0.00199699473909873,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step108000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step108000",
58
+ "model_sha": "1051adfd0cc49c3d50646c45f5bacee26bec6f83",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053571.8076205,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174164.877623974,
81
+ "end_time": 3174233.873981624,
82
+ "total_evaluation_time_seconds": "68.9963576500304"
83
+ }
pythia-410m-seed4/step109000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-21-43.752777.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.995,
5
+ "acc_stderr,none": 0.0022315868748448804,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step109000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step109000",
58
+ "model_sha": "83b978c99aeb2e97e34254e496bb987b74aadea1",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053648.3468106,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174241.474569528,
81
+ "end_time": 3174315.960057014,
82
+ "total_evaluation_time_seconds": "74.4854874862358"
83
+ }
pythia-410m-seed4/step110000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-22-59.465654.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.996,
5
+ "acc_stderr,none": 0.0019969947390987295,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step110000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step110000",
58
+ "model_sha": "8e0f5e533457e65e68ea1687f7d750d3517dd46f",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053729.805304,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174323.20888256,
81
+ "end_time": 3174391.674721372,
82
+ "total_evaluation_time_seconds": "68.46583881182596"
83
+ }
pythia-410m-seed4/step111000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-24-16.736732.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.995,
5
+ "acc_stderr,none": 0.0022315868748448804,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step111000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step111000",
58
+ "model_sha": "75a5a34ee4df4bc6d6e2061f549c89f13f40a6a5",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053806.4973712,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174399.148592579,
81
+ "end_time": 3174468.943743544,
82
+ "total_evaluation_time_seconds": "69.79515096498653"
83
+ }
pythia-410m-seed4/step112000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-25-34.394003.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.996,
5
+ "acc_stderr,none": 0.00199699473909873,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step112000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step112000",
58
+ "model_sha": "069a49005f1863a2834f6673b972821c4172a855",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053883.4256897,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174476.310799238,
81
+ "end_time": 3174546.601122461,
82
+ "total_evaluation_time_seconds": "70.29032322298735"
83
+ }
pythia-410m-seed4/step113000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-26-50.985348.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.995,
5
+ "acc_stderr,none": 0.002231586874844883,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step113000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step113000",
58
+ "model_sha": "f852ab0193d14dd9740686cbe6167b330a3a8152",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726053960.8546288,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174554.028408953,
81
+ "end_time": 3174623.192676419,
82
+ "total_evaluation_time_seconds": "69.16426746593788"
83
+ }
pythia-410m-seed4/step114000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-28-07.607695.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.996,
5
+ "acc_stderr,none": 0.0019969947390987295,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step114000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step114000",
58
+ "model_sha": "c4442ea94d31987faa20a59fdae08f8b132831ac",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054037.8852215,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174630.444699712,
81
+ "end_time": 3174699.814589495,
82
+ "total_evaluation_time_seconds": "69.36988978274167"
83
+ }
pythia-410m-seed4/step115000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-29-28.107074.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.997,
5
+ "acc_stderr,none": 0.0017303161543469323,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step115000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step115000",
58
+ "model_sha": "43c096e6928ce1a6aaa17d3edf4f7da291866f91",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054115.7741122,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174707.593298032,
81
+ "end_time": 3174780.314431014,
82
+ "total_evaluation_time_seconds": "72.72113298205659"
83
+ }
pythia-410m-seed4/step116000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-30-46.095399.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.995,
5
+ "acc_stderr,none": 0.0022315868748448843,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step116000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step116000",
58
+ "model_sha": "0dcca3614b5b09cd28f5114af7f5a7bb81526c87",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054196.562066,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174788.067478056,
81
+ "end_time": 3174858.302606478,
82
+ "total_evaluation_time_seconds": "70.23512842180207"
83
+ }
pythia-410m-seed4/step117000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-32-05.604265.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.996,
5
+ "acc_stderr,none": 0.0019969947390987286,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step117000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step117000",
58
+ "model_sha": "568685fe8a21d938d307ff5fadd0b8a78e02bb18",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054274.2991464,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174865.930445884,
81
+ "end_time": 3174937.811637507,
82
+ "total_evaluation_time_seconds": "71.88119162293151"
83
+ }
pythia-410m-seed4/step118000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-33-25.534289.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.995,
5
+ "acc_stderr,none": 0.0022315868748448843,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step118000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step118000",
58
+ "model_sha": "d65ce4cdb47d2b01e719db535c59da7b3dcce055",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054354.5642138,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3174945.65038225,
81
+ "end_time": 3175017.742361071,
82
+ "total_evaluation_time_seconds": "72.09197882097214"
83
+ }
pythia-410m-seed4/step119000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-34-43.598284.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.997,
5
+ "acc_stderr,none": 0.0017303161543469323,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step119000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step119000",
58
+ "model_sha": "e9e67ea2f15b099bc36df94e4a46814fa6fb4d3c",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054434.088045,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175025.561456304,
81
+ "end_time": 3175095.804914532,
82
+ "total_evaluation_time_seconds": "70.24345822818577"
83
+ }
pythia-410m-seed4/step120000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-36-00.802272.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.996,
5
+ "acc_stderr,none": 0.0019969947390987295,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step120000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step120000",
58
+ "model_sha": "665c0b9763f8f4693ddbcfe1b32d1e5e8a119ef2",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054510.3431325,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175103.329386,
81
+ "end_time": 3175173.010981774,
82
+ "total_evaluation_time_seconds": "69.68159577390179"
83
+ }
pythia-410m-seed4/step121000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-37-18.289072.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.995,
5
+ "acc_stderr,none": 0.00223158687484488,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step121000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step121000",
58
+ "model_sha": "62f89475876d6b03a9a74aa9b77eb2e7fffe480e",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054589.43841,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175180.764188185,
81
+ "end_time": 3175250.496448157,
82
+ "total_evaluation_time_seconds": "69.73225997202098"
83
+ }
pythia-410m-seed4/step122000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-38-37.810381.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.995,
5
+ "acc_stderr,none": 0.002231586874844886,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step122000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step122000",
58
+ "model_sha": "3dea4c0600f10791bf3b3e7a436e1afd51e7e4d4",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054666.9414806,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175258.277447748,
81
+ "end_time": 3175330.017774218,
82
+ "total_evaluation_time_seconds": "71.74032647022977"
83
+ }
pythia-410m-seed4/step123000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-39-54.410053.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.556,
5
+ "acc_stderr,none": 0.01571976816340209,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step123000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step123000",
58
+ "model_sha": "b884f27285abb230c4503db0a0c7f0a108651fae",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054746.318988,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175337.744163536,
81
+ "end_time": 3175406.618176568,
82
+ "total_evaluation_time_seconds": "68.87401303183287"
83
+ }
pythia-410m-seed4/step124000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-41-13.186597.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.811,
5
+ "acc_stderr,none": 0.012386784588117707,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step124000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step124000",
58
+ "model_sha": "5d1de82e1c460eee1d5740dfd29b255fc319a510",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054822.87003,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175414.424328497,
81
+ "end_time": 3175485.394043215,
82
+ "total_evaluation_time_seconds": "70.9697147179395"
83
+ }
pythia-410m-seed4/step125000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-42-29.407021.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.848,
5
+ "acc_stderr,none": 0.011358918303475287,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step125000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step125000",
58
+ "model_sha": "a2b5ed072ea727629e6e42508ba5eced86c87b8d",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054901.9414382,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175493.129590383,
81
+ "end_time": 3175561.613675601,
82
+ "total_evaluation_time_seconds": "68.48408521804959"
83
+ }
pythia-410m-seed4/step126000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-43-47.482809.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.896,
5
+ "acc_stderr,none": 0.00965801621852431,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step126000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step126000",
58
+ "model_sha": "c3c0664c687842190c7cf53f7bbe10d4075f01d1",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726054978.122537,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175569.359225627,
81
+ "end_time": 3175639.69052671,
82
+ "total_evaluation_time_seconds": "70.33130108285695"
83
+ }
pythia-410m-seed4/step127000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-45-05.196071.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.936,
5
+ "acc_stderr,none": 0.007743640226919263,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step127000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step127000",
58
+ "model_sha": "b5d7ea0ce201aac314d7c22b0cbb9ab26927466f",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055056.3555202,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175647.606272051,
81
+ "end_time": 3175717.403620297,
82
+ "total_evaluation_time_seconds": "69.79734824597836"
83
+ }
pythia-410m-seed4/step128000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-46-24.606850.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.973,
5
+ "acc_stderr,none": 0.0051280890492752884,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step128000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step128000",
58
+ "model_sha": "a1c85952b117fb4bdff91f603ba397c3c3ef9fc6",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055133.5762315,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175725.163800638,
81
+ "end_time": 3175796.814277698,
82
+ "total_evaluation_time_seconds": "71.65047705965117"
83
+ }
pythia-410m-seed4/step129000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-47-42.120585.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.979,
5
+ "acc_stderr,none": 0.0045364721513065165,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step129000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step129000",
58
+ "model_sha": "08eb17655a620815c50ce0b79e48fc685ef51f85",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055212.854979,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175804.624771082,
81
+ "end_time": 3175874.327507764,
82
+ "total_evaluation_time_seconds": "69.70273668225855"
83
+ }
pythia-410m-seed4/step130000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-48-59.445686.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.979,
5
+ "acc_stderr,none": 0.004536472151306531,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step130000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step130000",
58
+ "model_sha": "84492776bdca3a35bf59132706cdff9a92187353",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726055290.2834773,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3175882.18568684,
81
+ "end_time": 3175951.654583575,
82
+ "total_evaluation_time_seconds": "69.4688967349939"
83
+ }
pythia-410m-seed4/step91000/EleutherAI__pythia-410m-seed4/results_2024-09-11T03-58-05.643985.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.995,
5
+ "acc_stderr,none": 0.002231586874844886,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step91000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step91000",
58
+ "model_sha": "9f402d98396d276d31a17cb664f87902e9565f24",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726052238.8649433,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3172831.633535865,
81
+ "end_time": 3172897.849663655,
82
+ "total_evaluation_time_seconds": "66.21612778957933"
83
+ }
pythia-410m-seed4/step92000/EleutherAI__pythia-410m-seed4/results_2024-09-11T03-59-37.043072.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.996,
5
+ "acc_stderr,none": 0.00199699473909873,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step92000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step92000",
58
+ "model_sha": "53c16553dac9dedee2988ea428e99181f378db42",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726052329.781746,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3172906.56788209,
81
+ "end_time": 3172989.250889342,
82
+ "total_evaluation_time_seconds": "82.68300725193694"
83
+ }
pythia-410m-seed4/step93000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-00-57.709535.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.994,
5
+ "acc_stderr,none": 0.002443352199329838,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step93000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step93000",
58
+ "model_sha": "c118ea2e0a5dd621f8a76bacfc01a1aba721ae4f",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726052405.9039762,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3172997.133740732,
81
+ "end_time": 3173069.917167379,
82
+ "total_evaluation_time_seconds": "72.78342664707452"
83
+ }
pythia-410m-seed4/step94000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-02-16.617192.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.997,
5
+ "acc_stderr,none": 0.0017303161543469345,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step94000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step94000",
58
+ "model_sha": "0a7acc0e33412caacc9b10a7c5d25e159feafdb5",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726052486.1947706,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3173077.673712135,
81
+ "end_time": 3173148.824826443,
82
+ "total_evaluation_time_seconds": "71.15111430827528"
83
+ }
pythia-410m-seed4/step95000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-03-36.035116.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.996,
5
+ "acc_stderr,none": 0.00199699473909873,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step95000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step95000",
58
+ "model_sha": "fed2d8d3653ee436e30e7dd3e6b9c0a7de321195",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726052564.9502919,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3173156.630784183,
81
+ "end_time": 3173228.242917951,
82
+ "total_evaluation_time_seconds": "71.61213376838714"
83
+ }
pythia-410m-seed4/step96000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-04-55.889680.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.996,
5
+ "acc_stderr,none": 0.00199699473909873,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step96000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step96000",
58
+ "model_sha": "6a4ccec230d960a7c535af8a9395cdae0c185c4e",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726052644.625449,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3173235.965740894,
81
+ "end_time": 3173308.096414279,
82
+ "total_evaluation_time_seconds": "72.13067338522524"
83
+ }
pythia-410m-seed4/step97000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-06-14.033637.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.995,
5
+ "acc_stderr,none": 0.0022315868748448804,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step97000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step97000",
58
+ "model_sha": "198eed08045fbe1de5cb7d5380a37cb0c1a503f3",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726052723.3401382,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3173316.033475698,
81
+ "end_time": 3173386.240626305,
82
+ "total_evaluation_time_seconds": "70.20715060690418"
83
+ }
pythia-410m-seed4/step98000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-07-31.282587.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.997,
5
+ "acc_stderr,none": 0.0017303161543469323,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step98000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step98000",
58
+ "model_sha": "356dded64c0a1aeb6f5a76cb460083abbaed9f3a",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726052800.2190914,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 2500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3173393.490295854,
81
+ "end_time": 3173463.48977657,
82
+ "total_evaluation_time_seconds": "69.99948071595281"
83
+ }
pythia-410m-seed4/step99000/EleutherAI__pythia-410m-seed4/results_2024-09-11T04-08-48.682453.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp_anaphor_gender_agreement": {
4
+ "acc,none": 0.997,
5
+ "acc_stderr,none": 0.0017303161543469362,
6
+ "alias": "blimp_anaphor_gender_agreement"
7
+ }
8
+ },
9
+ "group_subtasks": {
10
+ "blimp_anaphor_gender_agreement": []
11
+ },
12
+ "configs": {
13
+ "blimp_anaphor_gender_agreement": {
14
+ "task": "blimp_anaphor_gender_agreement",
15
+ "group": "blimp",
16
+ "dataset_path": "blimp",
17
+ "dataset_name": "anaphor_gender_agreement",
18
+ "validation_split": "train",
19
+ "doc_to_text": "",
20
+ "doc_to_target": 0,
21
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "num_fewshot": 0,
26
+ "metric_list": [
27
+ {
28
+ "metric": "acc"
29
+ }
30
+ ],
31
+ "output_type": "multiple_choice",
32
+ "repeats": 1,
33
+ "should_decontaminate": true,
34
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
35
+ "metadata": {
36
+ "version": 1.0
37
+ }
38
+ }
39
+ },
40
+ "versions": {
41
+ "blimp_anaphor_gender_agreement": 1.0
42
+ },
43
+ "n-shot": {
44
+ "blimp_anaphor_gender_agreement": 0
45
+ },
46
+ "n-samples": {
47
+ "blimp_anaphor_gender_agreement": {
48
+ "original": 1000,
49
+ "effective": 1000
50
+ }
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=EleutherAI/pythia-410m-seed4,revision=step99000",
55
+ "model_num_parameters": 405334016,
56
+ "model_dtype": "torch.float16",
57
+ "model_revision": "step99000",
58
+ "model_sha": "27580757993a72860e5fdfd4a2f0f8da5db65348",
59
+ "batch_size": "128",
60
+ "batch_sizes": [],
61
+ "device": "cuda",
62
+ "use_cache": null,
63
+ "limit": null,
64
+ "bootstrap_iters": 100000,
65
+ "gen_kwargs": null,
66
+ "random_seed": 0,
67
+ "numpy_seed": 1234,
68
+ "torch_seed": 1234,
69
+ "fewshot_seed": 1234
70
+ },
71
+ "git_hash": "51a7ca9",
72
+ "date": 1726052877.4549294,
73
+ "pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: CentOS Linux release 7.9.2009 (Core) (x86_64)\nGCC version: (GCC) 12.1.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.17\n\nPython version: 3.9.0 (default, Oct 6 2020, 11:01:41) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] (64-bit runtime)\nPython platform: Linux-3.10.0-1160.119.1.el7.tuxcare.els2.x86_64-x86_64-with-glibc2.17\nIs CUDA available: True\nCUDA runtime version: 12.4.99\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100S-PCIE-32GB\nNvidia driver version: 550.90.07\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nThread(s) per core: 1\nCore(s) per socket: 32\nSocket(s): 1\nNUMA node(s): 2\nVendor ID: AuthenticAMD\nCPU family: 23\nModel: 49\nModel name: AMD EPYC 7502P 32-Core Processor\nStepping: 0\nCPU MHz: 1500.000\nCPU max MHz: 2500.0000\nCPU min MHz: 1500.0000\nBogoMIPS: 4999.78\nVirtualization: AMD-V\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 512K\nL3 cache: 16384K\nNUMA node0 CPU(s): 0-15\nNUMA node1 CPU(s): 16-31\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid aperfmperf eagerfpu pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 cpb cat_l3 cdp_l3 hw_pstate sme ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip overflow_recov succor smca\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.4.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
74
+ "transformers_version": "4.44.0",
75
+ "upper_git_hash": null,
76
+ "task_hashes": {},
77
+ "model_source": "hf",
78
+ "model_name": "EleutherAI/pythia-410m-seed4",
79
+ "model_name_sanitized": "EleutherAI__pythia-410m-seed4",
80
+ "start_time": 3173470.828815637,
81
+ "end_time": 3173540.882739733,
82
+ "total_evaluation_time_seconds": "70.05392409581691"
83
+ }