Upload imatrix.log with huggingface_hub
Browse files- imatrix.log +57 -49
imatrix.log
CHANGED
@@ -1,43 +1,50 @@
|
|
1 |
-
llama_model_loader: loaded meta data with
|
2 |
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
|
3 |
llama_model_loader: - kv 0: general.architecture str = internlm2
|
4 |
-
llama_model_loader: - kv 1: general.
|
5 |
-
llama_model_loader: - kv 2:
|
6 |
-
llama_model_loader: - kv 3:
|
7 |
-
llama_model_loader: - kv 4:
|
8 |
-
llama_model_loader: - kv 5:
|
9 |
-
llama_model_loader: - kv 6:
|
10 |
-
llama_model_loader: - kv 7:
|
11 |
-
llama_model_loader: - kv 8:
|
12 |
-
llama_model_loader: - kv 9:
|
13 |
-
llama_model_loader: - kv 10:
|
14 |
-
llama_model_loader: - kv 11:
|
15 |
-
llama_model_loader: - kv 12:
|
16 |
-
llama_model_loader: - kv 13:
|
17 |
-
llama_model_loader: - kv 14:
|
18 |
-
llama_model_loader: - kv 15:
|
19 |
-
llama_model_loader: - kv 16:
|
20 |
-
llama_model_loader: - kv 17:
|
21 |
-
llama_model_loader: - kv 18:
|
22 |
-
llama_model_loader: - kv 19:
|
23 |
-
llama_model_loader: - kv 20:
|
24 |
-
llama_model_loader: - kv 21:
|
25 |
-
llama_model_loader: - kv 22:
|
26 |
-
llama_model_loader: - kv 23:
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
llama_model_loader: - type f32: 65 tensors
|
28 |
llama_model_loader: - type q8_0: 226 tensors
|
29 |
-
llm_load_vocab: special tokens cache size =
|
30 |
-
llm_load_vocab: token to piece cache size = 0.
|
31 |
llm_load_print_meta: format = GGUF V3 (latest)
|
32 |
llm_load_print_meta: arch = internlm2
|
33 |
llm_load_print_meta: vocab type = SPM
|
34 |
llm_load_print_meta: n_vocab = 92544
|
35 |
llm_load_print_meta: n_merges = 0
|
|
|
36 |
llm_load_print_meta: n_ctx_train = 32768
|
37 |
llm_load_print_meta: n_embd = 4096
|
|
|
38 |
llm_load_print_meta: n_head = 32
|
39 |
llm_load_print_meta: n_head_kv = 8
|
40 |
-
llm_load_print_meta: n_layer = 32
|
41 |
llm_load_print_meta: n_rot = 128
|
42 |
llm_load_print_meta: n_swa = 0
|
43 |
llm_load_print_meta: n_embd_head_k = 128
|
@@ -69,12 +76,13 @@ llm_load_print_meta: model type = 7B
|
|
69 |
llm_load_print_meta: model ftype = Q8_0
|
70 |
llm_load_print_meta: model params = 7.74 B
|
71 |
llm_load_print_meta: model size = 7.66 GiB (8.50 BPW)
|
72 |
-
llm_load_print_meta: general.name =
|
73 |
llm_load_print_meta: BOS token = 1 '<s>'
|
74 |
-
llm_load_print_meta: EOS token = 92542 '
|
75 |
llm_load_print_meta: UNK token = 0 '<unk>'
|
76 |
llm_load_print_meta: PAD token = 2 '</s>'
|
77 |
llm_load_print_meta: LF token = 13 '<0x0A>'
|
|
|
78 |
llm_load_print_meta: max token length = 384
|
79 |
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
|
80 |
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
|
@@ -103,42 +111,42 @@ llama_new_context_with_model: graph splits = 2
|
|
103 |
|
104 |
system_info: n_threads = 25 / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 |
|
105 |
compute_imatrix: tokenizing the input ..
|
106 |
-
compute_imatrix: tokenization took
|
107 |
compute_imatrix: computing over 136 chunks with batch_size 512
|
108 |
-
compute_imatrix:
|
109 |
-
[1]5.
|
110 |
save_imatrix: stored collected data after 10 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
111 |
-
[10]5.
|
112 |
save_imatrix: stored collected data after 20 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
113 |
-
[20]6.
|
114 |
save_imatrix: stored collected data after 30 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
115 |
-
[30]8.
|
116 |
save_imatrix: stored collected data after 40 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
117 |
-
[40]6.
|
118 |
save_imatrix: stored collected data after 50 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
119 |
-
[50]7.
|
120 |
save_imatrix: stored collected data after 60 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
121 |
-
[60]7.
|
122 |
save_imatrix: stored collected data after 70 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
123 |
-
[70]7.
|
124 |
save_imatrix: stored collected data after 80 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
125 |
-
[80]7.
|
126 |
save_imatrix: stored collected data after 90 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
127 |
-
[90]7.
|
128 |
save_imatrix: stored collected data after 100 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
129 |
-
[100]7.
|
130 |
save_imatrix: stored collected data after 110 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
131 |
-
[110]7.
|
132 |
save_imatrix: stored collected data after 120 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
133 |
-
[120]7.
|
134 |
save_imatrix: stored collected data after 130 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
135 |
-
[130]7.
|
136 |
save_imatrix: stored collected data after 136 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
137 |
|
138 |
-
llama_print_timings: load time =
|
139 |
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
140 |
-
llama_print_timings: prompt eval time =
|
141 |
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
142 |
-
llama_print_timings: total time =
|
143 |
|
144 |
-
Final estimate: PPL = 7.
|
|
|
1 |
+
llama_model_loader: loaded meta data with 30 key-value pairs and 291 tensors from internlm2_5-7b-chat-IMat-GGUF/internlm2_5-7b-chat.Q8_0.gguf.hardlink.gguf (version GGUF V3 (latest))
|
2 |
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
|
3 |
llama_model_loader: - kv 0: general.architecture str = internlm2
|
4 |
+
llama_model_loader: - kv 1: general.type str = model
|
5 |
+
llama_model_loader: - kv 2: general.name str = Internlm2_5 7b Chat
|
6 |
+
llama_model_loader: - kv 3: general.finetune str = chat
|
7 |
+
llama_model_loader: - kv 4: general.basename str = internlm2_5
|
8 |
+
llama_model_loader: - kv 5: general.size_label str = 7B
|
9 |
+
llama_model_loader: - kv 6: general.license str = other
|
10 |
+
llama_model_loader: - kv 7: general.tags arr[str,1] = ["text-generation"]
|
11 |
+
llama_model_loader: - kv 8: internlm2.context_length u32 = 32768
|
12 |
+
llama_model_loader: - kv 9: internlm2.block_count u32 = 32
|
13 |
+
llama_model_loader: - kv 10: internlm2.embedding_length u32 = 4096
|
14 |
+
llama_model_loader: - kv 11: internlm2.feed_forward_length u32 = 14336
|
15 |
+
llama_model_loader: - kv 12: internlm2.rope.freq_base f32 = 1000000.000000
|
16 |
+
llama_model_loader: - kv 13: internlm2.attention.head_count u32 = 32
|
17 |
+
llama_model_loader: - kv 14: internlm2.attention.layer_norm_rms_epsilon f32 = 0.000010
|
18 |
+
llama_model_loader: - kv 15: internlm2.attention.head_count_kv u32 = 8
|
19 |
+
llama_model_loader: - kv 16: general.file_type u32 = 7
|
20 |
+
llama_model_loader: - kv 17: tokenizer.ggml.model str = llama
|
21 |
+
llama_model_loader: - kv 18: tokenizer.ggml.pre str = default
|
22 |
+
llama_model_loader: - kv 19: tokenizer.ggml.tokens arr[str,92544] = ["<unk>", "<s>", "</s>", "<0x00>", "<...
|
23 |
+
llama_model_loader: - kv 20: tokenizer.ggml.scores arr[f32,92544] = [-1000.000000, -1000.000000, -1000.00...
|
24 |
+
llama_model_loader: - kv 21: tokenizer.ggml.token_type arr[i32,92544] = [3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...
|
25 |
+
llama_model_loader: - kv 22: tokenizer.ggml.add_space_prefix bool = false
|
26 |
+
llama_model_loader: - kv 23: tokenizer.ggml.bos_token_id u32 = 1
|
27 |
+
llama_model_loader: - kv 24: tokenizer.ggml.eos_token_id u32 = 92542
|
28 |
+
llama_model_loader: - kv 25: tokenizer.ggml.padding_token_id u32 = 2
|
29 |
+
llama_model_loader: - kv 26: tokenizer.ggml.add_bos_token bool = true
|
30 |
+
llama_model_loader: - kv 27: tokenizer.ggml.add_eos_token bool = false
|
31 |
+
llama_model_loader: - kv 28: tokenizer.chat_template str = {{ bos_token }}{% for message in mess...
|
32 |
+
llama_model_loader: - kv 29: general.quantization_version u32 = 2
|
33 |
llama_model_loader: - type f32: 65 tensors
|
34 |
llama_model_loader: - type q8_0: 226 tensors
|
35 |
+
llm_load_vocab: special tokens cache size = 9
|
36 |
+
llm_load_vocab: token to piece cache size = 0.5508 MB
|
37 |
llm_load_print_meta: format = GGUF V3 (latest)
|
38 |
llm_load_print_meta: arch = internlm2
|
39 |
llm_load_print_meta: vocab type = SPM
|
40 |
llm_load_print_meta: n_vocab = 92544
|
41 |
llm_load_print_meta: n_merges = 0
|
42 |
+
llm_load_print_meta: vocab_only = 0
|
43 |
llm_load_print_meta: n_ctx_train = 32768
|
44 |
llm_load_print_meta: n_embd = 4096
|
45 |
+
llm_load_print_meta: n_layer = 32
|
46 |
llm_load_print_meta: n_head = 32
|
47 |
llm_load_print_meta: n_head_kv = 8
|
|
|
48 |
llm_load_print_meta: n_rot = 128
|
49 |
llm_load_print_meta: n_swa = 0
|
50 |
llm_load_print_meta: n_embd_head_k = 128
|
|
|
76 |
llm_load_print_meta: model ftype = Q8_0
|
77 |
llm_load_print_meta: model params = 7.74 B
|
78 |
llm_load_print_meta: model size = 7.66 GiB (8.50 BPW)
|
79 |
+
llm_load_print_meta: general.name = Internlm2_5 7b Chat
|
80 |
llm_load_print_meta: BOS token = 1 '<s>'
|
81 |
+
llm_load_print_meta: EOS token = 92542 '<|im_end|>'
|
82 |
llm_load_print_meta: UNK token = 0 '<unk>'
|
83 |
llm_load_print_meta: PAD token = 2 '</s>'
|
84 |
llm_load_print_meta: LF token = 13 '<0x0A>'
|
85 |
+
llm_load_print_meta: EOT token = 92542 '<|im_end|>'
|
86 |
llm_load_print_meta: max token length = 384
|
87 |
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
|
88 |
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
|
|
|
111 |
|
112 |
system_info: n_threads = 25 / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 |
|
113 |
compute_imatrix: tokenizing the input ..
|
114 |
+
compute_imatrix: tokenization took 101.295 ms
|
115 |
compute_imatrix: computing over 136 chunks with batch_size 512
|
116 |
+
compute_imatrix: 0.59 seconds per pass - ETA 1.32 minutes
|
117 |
+
[1]5.3769,[2]3.9691,[3]3.7820,[4]4.3833,[5]4.2913,[6]3.9200,[7]4.5687,[8]4.6228,[9]5.0904,
|
118 |
save_imatrix: stored collected data after 10 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
119 |
+
[10]5.2358,[11]4.8009,[12]5.1499,[13]5.7993,[14]6.1283,[15]6.6242,[16]6.8570,[17]6.5016,[18]6.7125,[19]7.0479,
|
120 |
save_imatrix: stored collected data after 20 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
121 |
+
[20]6.8082,[21]6.8744,[22]7.0183,[23]7.0556,[24]7.0649,[25]7.2574,[26]7.4557,[27]7.6284,[28]7.6740,[29]7.8141,
|
122 |
save_imatrix: stored collected data after 30 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
123 |
+
[30]8.0461,[31]8.1025,[32]7.8055,[33]7.4904,[34]7.1984,[35]6.9328,[36]6.7832,[37]6.6670,[38]6.5876,[39]6.5080,
|
124 |
save_imatrix: stored collected data after 40 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
125 |
+
[40]6.4166,[41]6.3603,[42]6.2501,[43]6.2315,[44]6.2919,[45]6.3633,[46]6.4997,[47]6.4768,[48]6.7117,[49]6.8956,
|
126 |
save_imatrix: stored collected data after 50 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
127 |
+
[50]7.0720,[51]7.2003,[52]7.3871,[53]7.2634,[54]7.3487,[55]7.4317,[56]7.5508,[57]7.4196,[58]7.4276,[59]7.4583,
|
128 |
save_imatrix: stored collected data after 60 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
129 |
+
[60]7.5612,[61]7.6982,[62]7.8157,[63]7.8791,[64]7.8829,[65]7.8916,[66]7.8631,[67]7.8273,[68]7.7453,[69]7.7176,
|
130 |
save_imatrix: stored collected data after 70 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
131 |
+
[70]7.7884,[71]7.8087,[72]7.7333,[73]7.6979,[74]7.7003,[75]7.6571,[76]7.6477,[77]7.6271,[78]7.6449,[79]7.5887,
|
132 |
save_imatrix: stored collected data after 80 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
133 |
+
[80]7.5901,[81]7.5365,[82]7.5011,[83]7.4550,[84]7.4343,[85]7.3733,[86]7.3403,[87]7.3036,[88]7.3420,[89]7.3542,
|
134 |
save_imatrix: stored collected data after 90 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
135 |
+
[90]7.3164,[91]7.3376,[92]7.3530,[93]7.3026,[94]7.2918,[95]7.2765,[96]7.3050,[97]7.3023,[98]7.3053,[99]7.2564,
|
136 |
save_imatrix: stored collected data after 100 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
137 |
+
[100]7.2231,[101]7.1735,[102]7.1225,[103]7.0841,[104]7.0417,[105]7.0015,[106]6.9774,[107]6.9807,[108]7.0191,[109]7.0920,
|
138 |
save_imatrix: stored collected data after 110 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
139 |
+
[110]7.1641,[111]7.2266,[112]7.3301,[113]7.3983,[114]7.4227,[115]7.4135,[116]7.4296,[117]7.4244,[118]7.4193,[119]7.3785,
|
140 |
save_imatrix: stored collected data after 120 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
141 |
+
[120]7.3659,[121]7.3952,[122]7.3990,[123]7.4023,[124]7.4202,[125]7.4587,[126]7.4862,[127]7.4988,[128]7.5187,[129]7.5457,
|
142 |
save_imatrix: stored collected data after 130 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
143 |
+
[130]7.5014,[131]7.5428,[132]7.6069,[133]7.6466,[134]7.7078,[135]7.7539,[136]7.8028,
|
144 |
save_imatrix: stored collected data after 136 chunks in internlm2_5-7b-chat-IMat-GGUF/imatrix.dat
|
145 |
|
146 |
+
llama_print_timings: load time = 2096.79 ms
|
147 |
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
148 |
+
llama_print_timings: prompt eval time = 70631.81 ms / 69632 tokens ( 1.01 ms per token, 985.84 tokens per second)
|
149 |
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
150 |
+
llama_print_timings: total time = 72872.89 ms / 69633 tokens
|
151 |
|
152 |
+
Final estimate: PPL = 7.8028 +/- 0.10727
|