Abhaykoul commited on
Commit
9daf86a
·
verified ·
1 Parent(s): 8b223e8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +61 -51
README.md CHANGED
@@ -3,6 +3,7 @@ datasets:
3
  - cerebras/SlimPajama-627B
4
  - HuggingFaceH4/ultrachat_200k
5
  - bigcode/starcoderdata
 
6
  language:
7
  - en
8
  metrics:
@@ -15,56 +16,66 @@ tags:
15
  - Transformers
16
  license: mit
17
  widget:
18
- - text: "<|system|>\nYou are a chatbot who can code!</s>\n<|user|>\nWrite me a function to search for OEvortex on youtube use Webbrowser .</s>\n<|assistant|>\n"
19
- - text: "<|system|>\nYou are a chatbot who can be a teacher!</s>\n<|user|>\nExplain me working of AI .</s>\n<|assistant|>\n"
 
 
 
 
 
 
 
 
 
 
20
  model-index:
21
- - name: HelpingAI-Lite
22
- results:
23
- - task:
24
- type: text-generation
25
- metrics:
26
- - name: Epoch
27
- type: Training Epoch
28
- value: 3.0
29
- - name: Eval Logits/Chosen
30
- type: Evaluation Logits for Chosen Samples
31
- value: -2.707406759262085
32
- - name: Eval Logits/Rejected
33
- type: Evaluation Logits for Rejected Samples
34
- value: -2.65652441978546
35
- - name: Eval Logps/Chosen
36
- type: Evaluation Log-probabilities for Chosen Samples
37
- value: -370.129670421875
38
- - name: Eval Logps/Rejected
39
- type: Evaluation Log-probabilities for Rejected Samples
40
- value: -296.073825390625
41
- - name: Eval Loss
42
- type: Evaluation Loss
43
- value: 0.513750433921814
44
- - name: Eval Rewards/Accuracies
45
- type: Evaluation Rewards and Accuracies
46
- value: 0.738095223903656
47
- - name: Eval Rewards/Chosen
48
- type: Evaluation Rewards for Chosen Samples
49
- value: -0.0274422804903984
50
- - name: Eval Rewards/Margins
51
- type: Evaluation Rewards Margins
52
- value: 1.008722543614307
53
- - name: Eval Rewards/Rejected
54
- type: Evaluation Rewards for Rejected Samples
55
- value: -1.03616464138031
56
- - name: Eval Runtime
57
- type: Evaluation Runtime
58
- value: 93.5908
59
- - name: Eval Samples
60
- type: Number of Evaluation Samples
61
- value: 2000
62
- - name: Eval Samples per Second
63
- type: Evaluation Samples per Second
64
- value: 21.37
65
- - name: Eval Steps per Second
66
- type: Evaluation Steps per Second
67
- value: 0.673
68
  ---
69
 
70
  # HelpingAI-Lite
@@ -124,5 +135,4 @@ prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_genera
124
  outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
125
 
126
  # Print the generated text
127
- print(outputs[0]["generated_text"])
128
-
 
3
  - cerebras/SlimPajama-627B
4
  - HuggingFaceH4/ultrachat_200k
5
  - bigcode/starcoderdata
6
+ - HuggingFaceH4/ultrafeedback_binarized
7
  language:
8
  - en
9
  metrics:
 
16
  - Transformers
17
  license: mit
18
  widget:
19
+ - text: |
20
+ <|system|>
21
+ You are a chatbot who can code!</s>
22
+ <|user|>
23
+ Write me a function to search for OEvortex on youtube use Webbrowser .</s>
24
+ <|assistant|>
25
+ - text: |
26
+ <|system|>
27
+ You are a chatbot who can be a teacher!</s>
28
+ <|user|>
29
+ Explain me working of AI .</s>
30
+ <|assistant|>
31
  model-index:
32
+ - name: HelpingAI-Lite
33
+ results:
34
+ - task:
35
+ type: text-generation
36
+ metrics:
37
+ - name: Epoch
38
+ type: Training Epoch
39
+ value: 3
40
+ - name: Eval Logits/Chosen
41
+ type: Evaluation Logits for Chosen Samples
42
+ value: -2.707406759262085
43
+ - name: Eval Logits/Rejected
44
+ type: Evaluation Logits for Rejected Samples
45
+ value: -2.65652441978546
46
+ - name: Eval Logps/Chosen
47
+ type: Evaluation Log-probabilities for Chosen Samples
48
+ value: -370.129670421875
49
+ - name: Eval Logps/Rejected
50
+ type: Evaluation Log-probabilities for Rejected Samples
51
+ value: -296.073825390625
52
+ - name: Eval Loss
53
+ type: Evaluation Loss
54
+ value: 0.513750433921814
55
+ - name: Eval Rewards/Accuracies
56
+ type: Evaluation Rewards and Accuracies
57
+ value: 0.738095223903656
58
+ - name: Eval Rewards/Chosen
59
+ type: Evaluation Rewards for Chosen Samples
60
+ value: -0.0274422804903984
61
+ - name: Eval Rewards/Margins
62
+ type: Evaluation Rewards Margins
63
+ value: 1.008722543614307
64
+ - name: Eval Rewards/Rejected
65
+ type: Evaluation Rewards for Rejected Samples
66
+ value: -1.03616464138031
67
+ - name: Eval Runtime
68
+ type: Evaluation Runtime
69
+ value: 93.5908
70
+ - name: Eval Samples
71
+ type: Number of Evaluation Samples
72
+ value: 2000
73
+ - name: Eval Samples per Second
74
+ type: Evaluation Samples per Second
75
+ value: 21.37
76
+ - name: Eval Steps per Second
77
+ type: Evaluation Steps per Second
78
+ value: 0.673
79
  ---
80
 
81
  # HelpingAI-Lite
 
135
  outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
136
 
137
  # Print the generated text
138
+ print(outputs[0]["generated_text"])