Files changed (1) hide show
  1. README.md +118 -1
README.md CHANGED
@@ -1,14 +1,117 @@
1
  ---
2
  language:
3
  - en
4
- library_name: transformers
5
  license: llama2
 
6
  datasets:
7
  - pankajmathur/orca_mini_v1_dataset
8
  - pankajmathur/dolly-v2_orca
9
  - pankajmathur/WizardLM_Orca
10
  - pankajmathur/alpaca_orca
11
  - ehartford/dolphin
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  ---
13
  # model_009
14
 
@@ -155,3 +258,17 @@ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-le
155
  | Winogrande (5-shot) | 82.32 |
156
  | GSM8K (5-shot) | 39.42 |
157
  | DROP (3-shot) | 44.01 |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  language:
3
  - en
 
4
  license: llama2
5
+ library_name: transformers
6
  datasets:
7
  - pankajmathur/orca_mini_v1_dataset
8
  - pankajmathur/dolly-v2_orca
9
  - pankajmathur/WizardLM_Orca
10
  - pankajmathur/alpaca_orca
11
  - ehartford/dolphin
12
+ model-index:
13
+ - name: model_009
14
+ results:
15
+ - task:
16
+ type: text-generation
17
+ name: Text Generation
18
+ dataset:
19
+ name: AI2 Reasoning Challenge (25-Shot)
20
+ type: ai2_arc
21
+ config: ARC-Challenge
22
+ split: test
23
+ args:
24
+ num_few_shot: 25
25
+ metrics:
26
+ - type: acc_norm
27
+ value: 71.59
28
+ name: normalized accuracy
29
+ source:
30
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=psmathur/model_009
31
+ name: Open LLM Leaderboard
32
+ - task:
33
+ type: text-generation
34
+ name: Text Generation
35
+ dataset:
36
+ name: HellaSwag (10-Shot)
37
+ type: hellaswag
38
+ split: validation
39
+ args:
40
+ num_few_shot: 10
41
+ metrics:
42
+ - type: acc_norm
43
+ value: 87.7
44
+ name: normalized accuracy
45
+ source:
46
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=psmathur/model_009
47
+ name: Open LLM Leaderboard
48
+ - task:
49
+ type: text-generation
50
+ name: Text Generation
51
+ dataset:
52
+ name: MMLU (5-Shot)
53
+ type: cais/mmlu
54
+ config: all
55
+ split: test
56
+ args:
57
+ num_few_shot: 5
58
+ metrics:
59
+ - type: acc
60
+ value: 69.43
61
+ name: accuracy
62
+ source:
63
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=psmathur/model_009
64
+ name: Open LLM Leaderboard
65
+ - task:
66
+ type: text-generation
67
+ name: Text Generation
68
+ dataset:
69
+ name: TruthfulQA (0-shot)
70
+ type: truthful_qa
71
+ config: multiple_choice
72
+ split: validation
73
+ args:
74
+ num_few_shot: 0
75
+ metrics:
76
+ - type: mc2
77
+ value: 60.72
78
+ source:
79
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=psmathur/model_009
80
+ name: Open LLM Leaderboard
81
+ - task:
82
+ type: text-generation
83
+ name: Text Generation
84
+ dataset:
85
+ name: Winogrande (5-shot)
86
+ type: winogrande
87
+ config: winogrande_xl
88
+ split: validation
89
+ args:
90
+ num_few_shot: 5
91
+ metrics:
92
+ - type: acc
93
+ value: 82.32
94
+ name: accuracy
95
+ source:
96
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=psmathur/model_009
97
+ name: Open LLM Leaderboard
98
+ - task:
99
+ type: text-generation
100
+ name: Text Generation
101
+ dataset:
102
+ name: GSM8k (5-shot)
103
+ type: gsm8k
104
+ config: main
105
+ split: test
106
+ args:
107
+ num_few_shot: 5
108
+ metrics:
109
+ - type: acc
110
+ value: 39.42
111
+ name: accuracy
112
+ source:
113
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=psmathur/model_009
114
+ name: Open LLM Leaderboard
115
  ---
116
  # model_009
117
 
 
258
  | Winogrande (5-shot) | 82.32 |
259
  | GSM8K (5-shot) | 39.42 |
260
  | DROP (3-shot) | 44.01 |
261
+
262
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
263
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_psmathur__model_009)
264
+
265
+ | Metric |Value|
266
+ |---------------------------------|----:|
267
+ |Avg. |68.53|
268
+ |AI2 Reasoning Challenge (25-Shot)|71.59|
269
+ |HellaSwag (10-Shot) |87.70|
270
+ |MMLU (5-Shot) |69.43|
271
+ |TruthfulQA (0-shot) |60.72|
272
+ |Winogrande (5-shot) |82.32|
273
+ |GSM8k (5-shot) |39.42|
274
+