yujiepan commited on
Commit
6110f58
·
verified ·
1 Parent(s): 5044344

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,652 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ pipeline_tag: image-text-to-text
4
+ inference: true
5
+ widget:
6
+ - text: Hello!
7
+ example_title: Hello world
8
+ group: Python
9
+ base_model:
10
+ - google/gemma-3n-E4B-it
11
+ ---
12
+
13
+ This tiny model is for debugging. It is randomly initialized with the config adapted from [google/gemma-3n-E4B-it](https://huggingface.co/google/gemma-3n-E4B-it).
14
+
15
+ ### Example usage:
16
+
17
+ ```python
18
+ import torch
19
+
20
+ from transformers import pipeline
21
+
22
+ model_id = "tiny-random/gemma-3n"
23
+ pipe = pipeline(
24
+ task="image-text-to-text",
25
+ model=model_id,
26
+ device=0,
27
+ torch_dtype=torch.bfloat16
28
+ )
29
+
30
+ # temporary patch for audio tower
31
+ from accelerate.hooks import ModelHook, add_hook_to_module
32
+
33
+ class EnsureDtype(ModelHook):
34
+ def pre_forward(self, module, *args, **kwargs):
35
+ args = list(args)
36
+ args[0] = args[0].to(module.dtype)
37
+ return super().pre_forward(module, *args, **kwargs)
38
+ add_hook_to_module(pipe.model.audio_tower, EnsureDtype())
39
+
40
+ messages = [
41
+ {
42
+ "role": "system",
43
+ "content": [
44
+ {"type": "text", "text": "You are a helpful assistant."}
45
+ ]
46
+ },
47
+ {
48
+ "role": "user",
49
+ "content": [
50
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"},
51
+ # audio is buggy for now: bf16 x fp32
52
+ {"type": "audio", "url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Audio/glass-breaking-151256.mp3"},
53
+ {"type": "text", "text": "Which image is cuter?"},
54
+ ]
55
+ },
56
+ ]
57
+ result = pipe(messages, min_new_tokens=512, max_new_tokens=512, do_sample=True)
58
+ print(result)
59
+ ```
60
+
61
+ ### Codes to create this repo:
62
+
63
+ ```python
64
+ import json
65
+ from pathlib import Path
66
+
67
+ import torch
68
+
69
+ import accelerate
70
+ from huggingface_hub import file_exists, hf_hub_download
71
+ from timm.models.mobilenetv5 import decode_arch_def
72
+ from transformers import (
73
+ AutoConfig,
74
+ AutoModelForCausalLM,
75
+ AutoProcessor,
76
+ AutoTokenizer,
77
+ Gemma3nForConditionalGeneration,
78
+ GenerationConfig,
79
+ set_seed,
80
+ )
81
+
82
+ source_model_id = "google/gemma-3n-E4B-it"
83
+ save_folder = "/tmp/tiny-random/gemma-3n"
84
+
85
+ processor = AutoProcessor.from_pretrained(source_model_id)
86
+ processor.save_pretrained(save_folder)
87
+
88
+ with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f:
89
+ config_json = json.load(f)
90
+
91
+ config_json['audio_config'].update({
92
+ "conf_num_attention_heads": 2,
93
+ "conf_num_hidden_layers": 2,
94
+ "hidden_size": 64,
95
+ })
96
+ config_json['text_config'].update({
97
+ "activation_sparsity_pattern": [0.95, 0.95, 0.0, 0.0],
98
+ "head_dim": 32, # required by vllm
99
+ "hidden_size": 32,
100
+ "hidden_size_per_layer_input": 2,
101
+ "intermediate_size": 64,
102
+ "laurel_rank": 8,
103
+ "layer_types": ['sliding_attention', 'full_attention', 'sliding_attention', 'full_attention'],
104
+ "num_attention_heads": 1,
105
+ "num_hidden_layers": 4,
106
+ "num_key_value_heads": 1,
107
+ "num_kv_shared_layers": 2,
108
+ "sliding_window": 512,
109
+ })
110
+ block_args = decode_arch_def(
111
+ [
112
+ # Stage 0: 128x128 in
113
+ [
114
+ 'er_r1_k3_s2_e4_c32',
115
+ 'er_r1_k3_s1_e4_c32',
116
+ ],
117
+ # Stage 1: 256x256 in
118
+ [
119
+ 'uir_r1_a3_k5_s2_e6_c32',
120
+ 'uir_r1_a5_k0_s1_e4_c32',
121
+ 'uir_r1_a3_k0_s1_e4_c32',
122
+ ],
123
+ # Stage 2: 640x640 in
124
+ [
125
+ "uir_r1_a5_k5_s2_e6_c32",
126
+ "uir_r1_a0_k0_s1_e1_c32",
127
+ "mqa_r1_k3_h2_v2_s1_d64_c32",
128
+ "uir_r1_a0_k0_s1_e2_c32",
129
+ ],
130
+ # Stage 3: 1280x1280 in
131
+ [
132
+ "uir_r1_a5_k5_s2_e6_c32",
133
+ "mqa_r1_k3_h2_s1_d64_c32",
134
+ "uir_r1_a0_k0_s1_e2_c32",
135
+ ],
136
+ ]
137
+ )
138
+ config_json['vision_config'].update({
139
+ "hidden_size": 2048, # hard-coded in timm
140
+ "model_args": {
141
+ "block_args": block_args,
142
+ }
143
+ })
144
+
145
+ with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f:
146
+ json.dump(config_json, f, indent=2)
147
+
148
+ config = AutoConfig.from_pretrained(
149
+ save_folder,
150
+ trust_remote_code=True,
151
+ )
152
+ print(config)
153
+
154
+ torch.set_default_dtype(torch.bfloat16)
155
+ model = Gemma3nForConditionalGeneration(config)
156
+ torch.set_default_dtype(torch.float32)
157
+ if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'):
158
+ model.generation_config = GenerationConfig.from_pretrained(
159
+ source_model_id, trust_remote_code=True,
160
+ )
161
+ set_seed(42)
162
+ model = model.cpu()
163
+ all_numels = 0
164
+ for name, p in sorted(model.named_parameters()):
165
+ all_numels += p.numel()
166
+ with torch.no_grad():
167
+ for name, p in sorted(model.named_parameters()):
168
+ torch.nn.init.normal_(p, 0, 0.2)
169
+ print(name, p.shape, f'{p.numel() / all_numels * 100: .4f}%')
170
+ model.save_pretrained(save_folder)
171
+ ```
172
+
173
+ ### Printing the model:
174
+
175
+ ```text
176
+ Gemma3nForConditionalGeneration(
177
+ (model): Gemma3nModel(
178
+ (vision_tower): TimmWrapperModel(
179
+ (timm_model): MobileNetV5Encoder(
180
+ (conv_stem): ConvNormAct(
181
+ (conv): Conv2dSame(3, 64, kernel_size=(3, 3), stride=(2, 2), bias=False)
182
+ (bn): RmsNormAct2d(
183
+ (drop): Identity()
184
+ (act): GELU(approximate='none')
185
+ )
186
+ )
187
+ (blocks): Sequential(
188
+ (0): Sequential(
189
+ (0): EdgeResidual(
190
+ (conv_exp): Conv2dSame(64, 256, kernel_size=(3, 3), stride=(2, 2), bias=False)
191
+ (bn1): RmsNormAct2d(
192
+ (drop): Identity()
193
+ (act): GELU(approximate='none')
194
+ )
195
+ (aa): Identity()
196
+ (se): Identity()
197
+ (conv_pwl): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
198
+ (bn2): RmsNormAct2d(
199
+ (drop): Identity()
200
+ (act): Identity()
201
+ )
202
+ (drop_path): Identity()
203
+ )
204
+ (1): EdgeResidual(
205
+ (conv_exp): Conv2d(32, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
206
+ (bn1): RmsNormAct2d(
207
+ (drop): Identity()
208
+ (act): GELU(approximate='none')
209
+ )
210
+ (aa): Identity()
211
+ (se): Identity()
212
+ (conv_pwl): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
213
+ (bn2): RmsNormAct2d(
214
+ (drop): Identity()
215
+ (act): Identity()
216
+ )
217
+ (drop_path): Identity()
218
+ )
219
+ )
220
+ (1): Sequential(
221
+ (0): UniversalInvertedResidual(
222
+ (dw_start): ConvNormAct(
223
+ (conv): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)
224
+ (bn): RmsNormAct2d(
225
+ (drop): Identity()
226
+ (act): Identity()
227
+ )
228
+ )
229
+ (pw_exp): ConvNormAct(
230
+ (conv): Conv2d(32, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)
231
+ (bn): RmsNormAct2d(
232
+ (drop): Identity()
233
+ (act): GELU(approximate='none')
234
+ )
235
+ )
236
+ (dw_mid): ConvNormAct(
237
+ (conv): Conv2dSame(192, 192, kernel_size=(5, 5), stride=(2, 2), groups=192, bias=False)
238
+ (bn): RmsNormAct2d(
239
+ (drop): Identity()
240
+ (act): GELU(approximate='none')
241
+ )
242
+ )
243
+ (se): Identity()
244
+ (pw_proj): ConvNormAct(
245
+ (conv): Conv2d(192, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
246
+ (bn): RmsNormAct2d(
247
+ (drop): Identity()
248
+ (act): Identity()
249
+ )
250
+ )
251
+ (dw_end): Identity()
252
+ (layer_scale): LayerScale2d()
253
+ (drop_path): Identity()
254
+ )
255
+ (1): UniversalInvertedResidual(
256
+ (dw_start): ConvNormAct(
257
+ (conv): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=32, bias=False)
258
+ (bn): RmsNormAct2d(
259
+ (drop): Identity()
260
+ (act): Identity()
261
+ )
262
+ )
263
+ (pw_exp): ConvNormAct(
264
+ (conv): Conv2d(32, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
265
+ (bn): RmsNormAct2d(
266
+ (drop): Identity()
267
+ (act): GELU(approximate='none')
268
+ )
269
+ )
270
+ (dw_mid): Identity()
271
+ (se): Identity()
272
+ (pw_proj): ConvNormAct(
273
+ (conv): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
274
+ (bn): RmsNormAct2d(
275
+ (drop): Identity()
276
+ (act): Identity()
277
+ )
278
+ )
279
+ (dw_end): Identity()
280
+ (layer_scale): LayerScale2d()
281
+ (drop_path): Identity()
282
+ )
283
+ (2): UniversalInvertedResidual(
284
+ (dw_start): ConvNormAct(
285
+ (conv): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)
286
+ (bn): RmsNormAct2d(
287
+ (drop): Identity()
288
+ (act): Identity()
289
+ )
290
+ )
291
+ (pw_exp): ConvNormAct(
292
+ (conv): Conv2d(32, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
293
+ (bn): RmsNormAct2d(
294
+ (drop): Identity()
295
+ (act): GELU(approximate='none')
296
+ )
297
+ )
298
+ (dw_mid): Identity()
299
+ (se): Identity()
300
+ (pw_proj): ConvNormAct(
301
+ (conv): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
302
+ (bn): RmsNormAct2d(
303
+ (drop): Identity()
304
+ (act): Identity()
305
+ )
306
+ )
307
+ (dw_end): Identity()
308
+ (layer_scale): LayerScale2d()
309
+ (drop_path): Identity()
310
+ )
311
+ )
312
+ (2): Sequential(
313
+ (0): UniversalInvertedResidual(
314
+ (dw_start): ConvNormAct(
315
+ (conv): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=32, bias=False)
316
+ (bn): RmsNormAct2d(
317
+ (drop): Identity()
318
+ (act): Identity()
319
+ )
320
+ )
321
+ (pw_exp): ConvNormAct(
322
+ (conv): Conv2d(32, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)
323
+ (bn): RmsNormAct2d(
324
+ (drop): Identity()
325
+ (act): GELU(approximate='none')
326
+ )
327
+ )
328
+ (dw_mid): ConvNormAct(
329
+ (conv): Conv2dSame(192, 192, kernel_size=(5, 5), stride=(2, 2), groups=192, bias=False)
330
+ (bn): RmsNormAct2d(
331
+ (drop): Identity()
332
+ (act): GELU(approximate='none')
333
+ )
334
+ )
335
+ (se): Identity()
336
+ (pw_proj): ConvNormAct(
337
+ (conv): Conv2d(192, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
338
+ (bn): RmsNormAct2d(
339
+ (drop): Identity()
340
+ (act): Identity()
341
+ )
342
+ )
343
+ (dw_end): Identity()
344
+ (layer_scale): LayerScale2d()
345
+ (drop_path): Identity()
346
+ )
347
+ (1): UniversalInvertedResidual(
348
+ (dw_start): Identity()
349
+ (pw_exp): ConvNormAct(
350
+ (conv): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
351
+ (bn): RmsNormAct2d(
352
+ (drop): Identity()
353
+ (act): GELU(approximate='none')
354
+ )
355
+ )
356
+ (dw_mid): Identity()
357
+ (se): Identity()
358
+ (pw_proj): ConvNormAct(
359
+ (conv): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
360
+ (bn): RmsNormAct2d(
361
+ (drop): Identity()
362
+ (act): Identity()
363
+ )
364
+ )
365
+ (dw_end): Identity()
366
+ (layer_scale): LayerScale2d()
367
+ (drop_path): Identity()
368
+ )
369
+ (2): MobileAttention(
370
+ (norm): RmsNormAct2d(
371
+ (drop): Identity()
372
+ (act): Identity()
373
+ )
374
+ (attn): MultiQueryAttention2d(
375
+ (query): Sequential(
376
+ (proj): Conv2d(32, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
377
+ )
378
+ (key): Sequential(
379
+ (down_conv): Conv2dSame(32, 32, kernel_size=(3, 3), stride=(2, 2), groups=32, bias=False)
380
+ (norm): RmsNorm2d()
381
+ (proj): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
382
+ )
383
+ (value): Sequential(
384
+ (down_conv): Conv2dSame(32, 32, kernel_size=(3, 3), stride=(2, 2), groups=32, bias=False)
385
+ (norm): RmsNorm2d()
386
+ (proj): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
387
+ )
388
+ (attn_drop): Dropout(p=0.0, inplace=False)
389
+ (output): Sequential(
390
+ (proj): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
391
+ (drop): Dropout(p=0.0, inplace=False)
392
+ )
393
+ )
394
+ (layer_scale): LayerScale2d()
395
+ (drop_path): Identity()
396
+ )
397
+ (3): UniversalInvertedResidual(
398
+ (dw_start): Identity()
399
+ (pw_exp): ConvNormAct(
400
+ (conv): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
401
+ (bn): RmsNormAct2d(
402
+ (drop): Identity()
403
+ (act): GELU(approximate='none')
404
+ )
405
+ )
406
+ (dw_mid): Identity()
407
+ (se): Identity()
408
+ (pw_proj): ConvNormAct(
409
+ (conv): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
410
+ (bn): RmsNormAct2d(
411
+ (drop): Identity()
412
+ (act): Identity()
413
+ )
414
+ )
415
+ (dw_end): Identity()
416
+ (layer_scale): LayerScale2d()
417
+ (drop_path): Identity()
418
+ )
419
+ )
420
+ (3): Sequential(
421
+ (0): UniversalInvertedResidual(
422
+ (dw_start): ConvNormAct(
423
+ (conv): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=32, bias=False)
424
+ (bn): RmsNormAct2d(
425
+ (drop): Identity()
426
+ (act): Identity()
427
+ )
428
+ )
429
+ (pw_exp): ConvNormAct(
430
+ (conv): Conv2d(32, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)
431
+ (bn): RmsNormAct2d(
432
+ (drop): Identity()
433
+ (act): GELU(approximate='none')
434
+ )
435
+ )
436
+ (dw_mid): ConvNormAct(
437
+ (conv): Conv2dSame(192, 192, kernel_size=(5, 5), stride=(2, 2), groups=192, bias=False)
438
+ (bn): RmsNormAct2d(
439
+ (drop): Identity()
440
+ (act): GELU(approximate='none')
441
+ )
442
+ )
443
+ (se): Identity()
444
+ (pw_proj): ConvNormAct(
445
+ (conv): Conv2d(192, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
446
+ (bn): RmsNormAct2d(
447
+ (drop): Identity()
448
+ (act): Identity()
449
+ )
450
+ )
451
+ (dw_end): Identity()
452
+ (layer_scale): LayerScale2d()
453
+ (drop_path): Identity()
454
+ )
455
+ (1): MobileAttention(
456
+ (norm): RmsNormAct2d(
457
+ (drop): Identity()
458
+ (act): Identity()
459
+ )
460
+ (attn): MultiQueryAttention2d(
461
+ (query): Sequential(
462
+ (proj): Conv2d(32, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
463
+ )
464
+ (key): Sequential(
465
+ (proj): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
466
+ )
467
+ (value): Sequential(
468
+ (proj): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
469
+ )
470
+ (attn_drop): Dropout(p=0.0, inplace=False)
471
+ (output): Sequential(
472
+ (proj): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
473
+ (drop): Dropout(p=0.0, inplace=False)
474
+ )
475
+ )
476
+ (layer_scale): LayerScale2d()
477
+ (drop_path): Identity()
478
+ )
479
+ (2): UniversalInvertedResidual(
480
+ (dw_start): Identity()
481
+ (pw_exp): ConvNormAct(
482
+ (conv): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
483
+ (bn): RmsNormAct2d(
484
+ (drop): Identity()
485
+ (act): GELU(approximate='none')
486
+ )
487
+ )
488
+ (dw_mid): Identity()
489
+ (se): Identity()
490
+ (pw_proj): ConvNormAct(
491
+ (conv): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
492
+ (bn): RmsNormAct2d(
493
+ (drop): Identity()
494
+ (act): Identity()
495
+ )
496
+ )
497
+ (dw_end): Identity()
498
+ (layer_scale): LayerScale2d()
499
+ (drop_path): Identity()
500
+ )
501
+ )
502
+ )
503
+ (msfa): MobileNetV5MultiScaleFusionAdapter(
504
+ (ffn): UniversalInvertedResidual(
505
+ (dw_start): Identity()
506
+ (pw_exp): ConvNormAct(
507
+ (conv): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
508
+ (bn): RmsNormAct2d(
509
+ (drop): Identity()
510
+ (act): GELU(approximate='none')
511
+ )
512
+ )
513
+ (dw_mid): Identity()
514
+ (se): Identity()
515
+ (pw_proj): ConvNormAct(
516
+ (conv): Conv2d(128, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
517
+ (bn): RmsNormAct2d(
518
+ (drop): Identity()
519
+ (act): Identity()
520
+ )
521
+ )
522
+ (dw_end): Identity()
523
+ (layer_scale): Identity()
524
+ (drop_path): Identity()
525
+ )
526
+ (norm): RmsNorm2d()
527
+ )
528
+ )
529
+ )
530
+ (language_model): Gemma3nTextModel(
531
+ (embed_tokens): Gemma3nTextScaledWordEmbedding(262400, 32, padding_idx=0)
532
+ (layers): ModuleList(
533
+ (0-3): 4 x Gemma3nTextDecoderLayer(
534
+ (self_attn): Gemma3nTextAttention(
535
+ (q_proj): Linear(in_features=32, out_features=32, bias=False)
536
+ (k_proj): Linear(in_features=32, out_features=32, bias=False)
537
+ (v_proj): Linear(in_features=32, out_features=32, bias=False)
538
+ (o_proj): Linear(in_features=32, out_features=32, bias=False)
539
+ (q_norm): Gemma3nRMSNorm((32,), eps=1e-06)
540
+ (k_norm): Gemma3nRMSNorm((32,), eps=1e-06)
541
+ (v_norm): Gemma3nRMSNorm((), eps=1e-06)
542
+ )
543
+ (mlp): Gemma3nTextMLP(
544
+ (gate_proj): Linear(in_features=32, out_features=64, bias=False)
545
+ (up_proj): Linear(in_features=32, out_features=64, bias=False)
546
+ (down_proj): Linear(in_features=64, out_features=32, bias=False)
547
+ (act_fn): PytorchGELUTanh()
548
+ )
549
+ (input_layernorm): Gemma3nRMSNorm((32,), eps=1e-06)
550
+ (post_attention_layernorm): Gemma3nRMSNorm((32,), eps=1e-06)
551
+ (pre_feedforward_layernorm): Gemma3nRMSNorm((32,), eps=1e-06)
552
+ (post_feedforward_layernorm): Gemma3nRMSNorm((32,), eps=1e-06)
553
+ (act_fn): PytorchGELUTanh()
554
+ (altup): Gemma3nTextAltUp(
555
+ (correction_coefs): Linear(in_features=4, out_features=4, bias=False)
556
+ (prediction_coefs): Linear(in_features=4, out_features=16, bias=False)
557
+ (modality_router): Linear(in_features=32, out_features=4, bias=False)
558
+ (router_norm): Gemma3nRMSNorm((32,), eps=1e-06)
559
+ )
560
+ (laurel): Gemma3nTextLaurelBlock(
561
+ (linear_left): Linear(in_features=32, out_features=8, bias=False)
562
+ (linear_right): Linear(in_features=8, out_features=32, bias=False)
563
+ (post_laurel_norm): Gemma3nRMSNorm((32,), eps=1e-06)
564
+ )
565
+ (per_layer_input_gate): Linear(in_features=32, out_features=2, bias=False)
566
+ (per_layer_projection): Linear(in_features=2, out_features=32, bias=False)
567
+ (post_per_layer_input_norm): Gemma3nRMSNorm((32,), eps=1e-06)
568
+ )
569
+ )
570
+ (norm): Gemma3nRMSNorm((32,), eps=1e-06)
571
+ (rotary_emb): Gemma3nTextRotaryEmbedding()
572
+ (rotary_emb_local): Gemma3nTextRotaryEmbedding()
573
+ (embed_tokens_per_layer): Gemma3nTextScaledWordEmbedding(262144, 8, padding_idx=0)
574
+ (per_layer_model_projection): Linear(in_features=32, out_features=8, bias=False)
575
+ (per_layer_projection_norm): Gemma3nRMSNorm((2,), eps=1e-06)
576
+ (altup_projections): ModuleList(
577
+ (0-2): 3 x Linear(in_features=32, out_features=32, bias=False)
578
+ )
579
+ (altup_unembed_projections): ModuleList(
580
+ (0-2): 3 x Linear(in_features=32, out_features=32, bias=False)
581
+ )
582
+ )
583
+ (audio_tower): Gemma3nAudioEncoder(
584
+ (subsample_conv_projection): Gemma3nAudioSubSampleConvProjection(
585
+ (conv_0): Gemma3nAudioSSCPConvBlock(
586
+ (conv): Conv2d(1, 128, kernel_size=(3, 3), stride=(2, 2), bias=False)
587
+ (norm): Gemma3nAudioCumulativeGroupNorm()
588
+ (activation): ReLU()
589
+ )
590
+ (conv_1): Gemma3nAudioSSCPConvBlock(
591
+ (conv): Conv2d(128, 32, kernel_size=(3, 3), stride=(2, 2), bias=False)
592
+ (norm): Gemma3nAudioCumulativeGroupNorm()
593
+ (activation): ReLU()
594
+ )
595
+ (input_proj_linear): Linear(in_features=1024, out_features=64, bias=False)
596
+ )
597
+ (conformer): ModuleList(
598
+ (0-1): 2 x Gemma3nAudioConformerBlock(
599
+ (ffw_layer_start): Gemma3nAudioConformerFeedForward(
600
+ (pre_layer_norm): Gemma3nRMSNorm((64,), eps=1e-06)
601
+ (ffw_layer_1): Linear(in_features=64, out_features=256, bias=False)
602
+ (ffw_layer_2): Linear(in_features=256, out_features=64, bias=False)
603
+ (post_layer_norm): Gemma3nRMSNorm((64,), eps=1e-06)
604
+ )
605
+ (attention): Gemma3nAudioConformerAttention(
606
+ (pre_attn_norm): Gemma3nRMSNorm((64,), eps=1e-06)
607
+ (attn): Gemma3nAudioAttention(
608
+ (relative_position_embedding): Gemma3nAudioRelativePositionEmbedding(
609
+ (pos_proj): Linear(in_features=64, out_features=64, bias=False)
610
+ )
611
+ (q_proj): Linear(in_features=64, out_features=64, bias=False)
612
+ (k_proj): Linear(in_features=64, out_features=64, bias=False)
613
+ (v_proj): Linear(in_features=64, out_features=64, bias=False)
614
+ )
615
+ (post): Linear(in_features=64, out_features=64, bias=False)
616
+ (post_norm): Gemma3nRMSNorm((64,), eps=1e-06)
617
+ )
618
+ (lconv1d): Gemma3nAudioConformerLightConv1d(
619
+ (pre_layer_norm): Gemma3nRMSNorm((64,), eps=1e-06)
620
+ (linear_start): Linear(in_features=64, out_features=128, bias=False)
621
+ (depthwise_conv1d): Conv1d(64, 64, kernel_size=(5,), stride=(1,), groups=64, bias=False)
622
+ (conv_norm): Gemma3nRMSNorm((64,), eps=1e-06)
623
+ (linear_end): Linear(in_features=64, out_features=64, bias=False)
624
+ )
625
+ (ffw_layer_end): Gemma3nAudioConformerFeedForward(
626
+ (pre_layer_norm): Gemma3nRMSNorm((64,), eps=1e-06)
627
+ (ffw_layer_1): Linear(in_features=64, out_features=256, bias=False)
628
+ (ffw_layer_2): Linear(in_features=256, out_features=64, bias=False)
629
+ (post_layer_norm): Gemma3nRMSNorm((64,), eps=1e-06)
630
+ )
631
+ (norm): Gemma3nRMSNorm((64,), eps=1e-06)
632
+ )
633
+ )
634
+ )
635
+ (embed_vision): Gemma3nMultimodalEmbedder(
636
+ (embedding): Embedding(128, 2048)
637
+ (hard_embedding_norm): Gemma3nRMSNorm((2048,), eps=1e-06)
638
+ (soft_embedding_norm): Gemma3nRMSNorm((2048,), eps=1e-06)
639
+ (embedding_projection): Linear(in_features=2048, out_features=32, bias=False)
640
+ (embedding_post_projection_norm): Gemma3nRMSNorm((), eps=1e-06)
641
+ )
642
+ (embed_audio): Gemma3nMultimodalEmbedder(
643
+ (embedding): Embedding(128, 64)
644
+ (hard_embedding_norm): Gemma3nRMSNorm((64,), eps=1e-06)
645
+ (soft_embedding_norm): Gemma3nRMSNorm((64,), eps=1e-06)
646
+ (embedding_projection): Linear(in_features=64, out_features=32, bias=False)
647
+ (embedding_post_projection_norm): Gemma3nRMSNorm((), eps=1e-06)
648
+ )
649
+ )
650
+ (lm_head): Linear(in_features=32, out_features=262400, bias=False)
651
+ )
652
+ ```
chat_template.jinja ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'audio' -%}
33
+ {{ '<audio_soft_token>' }}
34
+ {%- elif item['type'] == 'image' -%}
35
+ {{ '<image_soft_token>' }}
36
+ {%- elif item['type'] == 'text' -%}
37
+ {{ item['text'] | trim }}
38
+ {%- endif -%}
39
+ {%- endfor -%}
40
+ {%- else -%}
41
+ {{ raise_exception("Invalid content type") }}
42
+ {%- endif -%}
43
+ {{ '<end_of_turn>
44
+ ' }}
45
+ {%- endfor -%}
46
+ {%- if add_generation_prompt -%}
47
+ {{'<start_of_turn>model
48
+ '}}
49
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3nForConditionalGeneration"
4
+ ],
5
+ "audio_config": {
6
+ "conf_attention_chunk_size": 12,
7
+ "conf_attention_context_left": 13,
8
+ "conf_attention_context_right": 0,
9
+ "conf_attention_logit_cap": 50.0,
10
+ "conf_conv_kernel_size": 5,
11
+ "conf_num_attention_heads": 2,
12
+ "conf_num_hidden_layers": 2,
13
+ "conf_positional_bias_size": 256,
14
+ "conf_reduction_factor": 4,
15
+ "conf_residual_weight": 0.5,
16
+ "gradient_clipping": 10000000000.0,
17
+ "hidden_size": 64,
18
+ "input_feat_size": 128,
19
+ "model_type": "gemma3n_audio",
20
+ "rms_norm_eps": 1e-06,
21
+ "sscp_conv_channel_size": [
22
+ 128,
23
+ 32
24
+ ],
25
+ "sscp_conv_eps": 0.001,
26
+ "sscp_conv_group_norm_eps": 0.001,
27
+ "sscp_conv_kernel_size": [
28
+ [
29
+ 3,
30
+ 3
31
+ ],
32
+ [
33
+ 3,
34
+ 3
35
+ ]
36
+ ],
37
+ "sscp_conv_stride_size": [
38
+ [
39
+ 2,
40
+ 2
41
+ ],
42
+ [
43
+ 2,
44
+ 2
45
+ ]
46
+ ],
47
+ "torch_dtype": "bfloat16",
48
+ "vocab_offset": 262272,
49
+ "vocab_size": 128
50
+ },
51
+ "audio_soft_tokens_per_image": 188,
52
+ "audio_token_id": 262273,
53
+ "boa_token_id": 256000,
54
+ "boi_token_id": 255999,
55
+ "eoa_token_id": 262272,
56
+ "eoi_token_id": 262144,
57
+ "eos_token_id": [
58
+ 1,
59
+ 106
60
+ ],
61
+ "image_token_id": 262145,
62
+ "initializer_range": 0.02,
63
+ "model_type": "gemma3n",
64
+ "text_config": {
65
+ "activation_sparsity_pattern": [
66
+ 0.95,
67
+ 0.95,
68
+ 0.0,
69
+ 0.0
70
+ ],
71
+ "altup_active_idx": 0,
72
+ "altup_coef_clip": 120.0,
73
+ "altup_correct_scale": true,
74
+ "altup_lr_multiplier": 1.0,
75
+ "altup_num_inputs": 4,
76
+ "attention_bias": false,
77
+ "attention_dropout": 0.0,
78
+ "final_logit_softcapping": 30.0,
79
+ "head_dim": 32,
80
+ "hidden_activation": "gelu_pytorch_tanh",
81
+ "hidden_size": 32,
82
+ "hidden_size_per_layer_input": 2,
83
+ "initializer_range": 0.02,
84
+ "intermediate_size": [
85
+ 64,
86
+ 64,
87
+ 64,
88
+ 64
89
+ ],
90
+ "laurel_rank": 8,
91
+ "layer_types": [
92
+ "sliding_attention",
93
+ "full_attention",
94
+ "sliding_attention",
95
+ "full_attention"
96
+ ],
97
+ "max_position_embeddings": 32768,
98
+ "model_type": "gemma3n_text",
99
+ "num_attention_heads": 1,
100
+ "num_hidden_layers": 4,
101
+ "num_key_value_heads": 1,
102
+ "num_kv_shared_layers": 2,
103
+ "query_pre_attn_scalar": 256,
104
+ "rms_norm_eps": 1e-06,
105
+ "rope_local_base_freq": 10000.0,
106
+ "rope_scaling": null,
107
+ "rope_theta": 1000000.0,
108
+ "sliding_window": 512,
109
+ "torch_dtype": "bfloat16",
110
+ "use_cache": true,
111
+ "vocab_size": 262400,
112
+ "vocab_size_per_layer_input": 262144
113
+ },
114
+ "torch_dtype": "bfloat16",
115
+ "transformers_version": "4.54.0.dev0",
116
+ "vision_config": {
117
+ "architecture": "mobilenetv5_300m_enc",
118
+ "do_pooling": true,
119
+ "hidden_size": 2048,
120
+ "initializer_range": 0.02,
121
+ "label_names": [
122
+ "LABEL_0",
123
+ "LABEL_1"
124
+ ],
125
+ "model_args": {
126
+ "block_args": [
127
+ [
128
+ {
129
+ "act_layer": null,
130
+ "block_type": "er",
131
+ "exp_kernel_size": 3,
132
+ "exp_ratio": 4.0,
133
+ "force_in_chs": 0,
134
+ "noskip": false,
135
+ "out_chs": 32,
136
+ "pw_kernel_size": 1,
137
+ "se_ratio": 0.0,
138
+ "stride": 2
139
+ },
140
+ {
141
+ "act_layer": null,
142
+ "block_type": "er",
143
+ "exp_kernel_size": 3,
144
+ "exp_ratio": 4.0,
145
+ "force_in_chs": 0,
146
+ "noskip": false,
147
+ "out_chs": 32,
148
+ "pw_kernel_size": 1,
149
+ "se_ratio": 0.0,
150
+ "stride": 1
151
+ }
152
+ ],
153
+ [
154
+ {
155
+ "act_layer": null,
156
+ "block_type": "uir",
157
+ "dw_kernel_size_end": 0,
158
+ "dw_kernel_size_mid": 5,
159
+ "dw_kernel_size_start": 3,
160
+ "exp_ratio": 6.0,
161
+ "noskip": false,
162
+ "out_chs": 32,
163
+ "se_ratio": 0.0,
164
+ "stride": 2
165
+ },
166
+ {
167
+ "act_layer": null,
168
+ "block_type": "uir",
169
+ "dw_kernel_size_end": 0,
170
+ "dw_kernel_size_mid": 0,
171
+ "dw_kernel_size_start": 5,
172
+ "exp_ratio": 4.0,
173
+ "noskip": false,
174
+ "out_chs": 32,
175
+ "se_ratio": 0.0,
176
+ "stride": 1
177
+ },
178
+ {
179
+ "act_layer": null,
180
+ "block_type": "uir",
181
+ "dw_kernel_size_end": 0,
182
+ "dw_kernel_size_mid": 0,
183
+ "dw_kernel_size_start": 3,
184
+ "exp_ratio": 4.0,
185
+ "noskip": false,
186
+ "out_chs": 32,
187
+ "se_ratio": 0.0,
188
+ "stride": 1
189
+ }
190
+ ],
191
+ [
192
+ {
193
+ "act_layer": null,
194
+ "block_type": "uir",
195
+ "dw_kernel_size_end": 0,
196
+ "dw_kernel_size_mid": 5,
197
+ "dw_kernel_size_start": 5,
198
+ "exp_ratio": 6.0,
199
+ "noskip": false,
200
+ "out_chs": 32,
201
+ "se_ratio": 0.0,
202
+ "stride": 2
203
+ },
204
+ {
205
+ "act_layer": null,
206
+ "block_type": "uir",
207
+ "dw_kernel_size_end": 0,
208
+ "dw_kernel_size_mid": 0,
209
+ "dw_kernel_size_start": 0,
210
+ "exp_ratio": 1.0,
211
+ "noskip": false,
212
+ "out_chs": 32,
213
+ "se_ratio": 0.0,
214
+ "stride": 1
215
+ },
216
+ {
217
+ "act_layer": null,
218
+ "block_type": "mqa",
219
+ "dw_kernel_size": 3,
220
+ "key_dim": 64,
221
+ "kv_stride": 2,
222
+ "noskip": false,
223
+ "num_heads": 2,
224
+ "out_chs": 32,
225
+ "stride": 1,
226
+ "value_dim": 64
227
+ },
228
+ {
229
+ "act_layer": null,
230
+ "block_type": "uir",
231
+ "dw_kernel_size_end": 0,
232
+ "dw_kernel_size_mid": 0,
233
+ "dw_kernel_size_start": 0,
234
+ "exp_ratio": 2.0,
235
+ "noskip": false,
236
+ "out_chs": 32,
237
+ "se_ratio": 0.0,
238
+ "stride": 1
239
+ }
240
+ ],
241
+ [
242
+ {
243
+ "act_layer": null,
244
+ "block_type": "uir",
245
+ "dw_kernel_size_end": 0,
246
+ "dw_kernel_size_mid": 5,
247
+ "dw_kernel_size_start": 5,
248
+ "exp_ratio": 6.0,
249
+ "noskip": false,
250
+ "out_chs": 32,
251
+ "se_ratio": 0.0,
252
+ "stride": 2
253
+ },
254
+ {
255
+ "act_layer": null,
256
+ "block_type": "mqa",
257
+ "dw_kernel_size": 3,
258
+ "key_dim": 64,
259
+ "kv_stride": 1,
260
+ "noskip": false,
261
+ "num_heads": 2,
262
+ "out_chs": 32,
263
+ "stride": 1,
264
+ "value_dim": 64
265
+ },
266
+ {
267
+ "act_layer": null,
268
+ "block_type": "uir",
269
+ "dw_kernel_size_end": 0,
270
+ "dw_kernel_size_mid": 0,
271
+ "dw_kernel_size_start": 0,
272
+ "exp_ratio": 2.0,
273
+ "noskip": false,
274
+ "out_chs": 32,
275
+ "se_ratio": 0.0,
276
+ "stride": 1
277
+ }
278
+ ]
279
+ ]
280
+ },
281
+ "model_type": "gemma3n_vision",
282
+ "num_classes": 2,
283
+ "rms_norm_eps": 1e-06,
284
+ "torch_dtype": "bfloat16",
285
+ "vocab_offset": 262144,
286
+ "vocab_size": 128
287
+ },
288
+ "vision_soft_tokens_per_image": 256
289
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "cache_implementation": "hybrid",
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 1,
7
+ 106
8
+ ],
9
+ "pad_token_id": 0,
10
+ "top_k": 64,
11
+ "top_p": 0.95,
12
+ "transformers_version": "4.54.0.dev0",
13
+ "trust_remote_code": true
14
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07cf4ccabeb400baa5b96ad6df316bb7ff86578f9450bf5b1e2a226e15c448f5
3
+ size 23576116
preprocessor_config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": false,
5
+ "device": null,
6
+ "disable_grouping": null,
7
+ "dither": 0.0,
8
+ "do_center_crop": null,
9
+ "do_convert_rgb": null,
10
+ "do_normalize": false,
11
+ "do_rescale": true,
12
+ "do_resize": true,
13
+ "feature_size": 128,
14
+ "fft_length": 1024,
15
+ "fft_overdrive": true,
16
+ "frame_length": 512,
17
+ "hop_length": 160,
18
+ "image_mean": [
19
+ 0.5,
20
+ 0.5,
21
+ 0.5
22
+ ],
23
+ "image_processor_type": "SiglipImageProcessorFast",
24
+ "image_seq_length": 256,
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "input_data_format": null,
31
+ "input_scale_factor": 1.0,
32
+ "max_frequency": 7600.0,
33
+ "mel_floor": 1e-05,
34
+ "min_frequency": 125.0,
35
+ "padding_side": "right",
36
+ "padding_value": 0.0,
37
+ "per_bin_mean": null,
38
+ "per_bin_stddev": null,
39
+ "preemphasis": 0.97,
40
+ "preemphasis_htk_flavor": true,
41
+ "processor_class": "Gemma3nProcessor",
42
+ "resample": 2,
43
+ "rescale_factor": 0.00392156862745098,
44
+ "return_attention_mask": false,
45
+ "return_tensors": null,
46
+ "sampling_rate": 16000,
47
+ "size": {
48
+ "height": 768,
49
+ "width": 768
50
+ }
51
+ }
processor_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "audio_seq_length": 188,
3
+ "image_seq_length": 256,
4
+ "processor_class": "Gemma3nProcessor"
5
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_token": "<audio_soft_token>",
3
+ "boa_token": "<start_of_audio>",
4
+ "boi_token": "<start_of_image>",
5
+ "bos_token": {
6
+ "content": "<bos>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eoa_token": "<end_of_audio>",
13
+ "eoi_token": "<end_of_image>",
14
+ "eos_token": {
15
+ "content": "<eos>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "image_token": "<image_soft_token>",
22
+ "pad_token": {
23
+ "content": "<pad>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false
28
+ },
29
+ "unk_token": {
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ }
36
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6c35ee648c07754b44cd9e371c75d4caa05c4504910b7ad29b1847ee9d8ba5d
3
+ size 33442553
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea5f0cc48abfbfc04d14562270a32e02149a3e7035f368cc5a462786f4a59961
3
+ size 4696020
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff