bavihao commited on
Commit
45074eb
·
verified ·
1 Parent(s): ec0d4b5

Upload WhisperForConditionalGeneration

Browse files
Files changed (2) hide show
  1. config.json +16 -1
  2. model.safetensors +3 -0
config.json CHANGED
@@ -42,8 +42,23 @@
42
  "num_hidden_layers": 32,
43
  "num_mel_bins": 80,
44
  "pad_token_id": 50257,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  "scale_embedding": false,
46
- "torch_dtype": "float32",
47
  "transformers_version": "4.42.0.dev0",
48
  "use_cache": false,
49
  "use_weighted_layer_sum": false,
 
42
  "num_hidden_layers": 32,
43
  "num_mel_bins": 80,
44
  "pad_token_id": 50257,
45
+ "quantization_config": {
46
+ "_load_in_4bit": false,
47
+ "_load_in_8bit": true,
48
+ "bnb_4bit_compute_dtype": "float32",
49
+ "bnb_4bit_quant_storage": "uint8",
50
+ "bnb_4bit_quant_type": "fp4",
51
+ "bnb_4bit_use_double_quant": false,
52
+ "llm_int8_enable_fp32_cpu_offload": false,
53
+ "llm_int8_has_fp16_weight": false,
54
+ "llm_int8_skip_modules": null,
55
+ "llm_int8_threshold": 6.0,
56
+ "load_in_4bit": false,
57
+ "load_in_8bit": true,
58
+ "quant_method": "bitsandbytes"
59
+ },
60
  "scale_embedding": false,
61
+ "torch_dtype": "float16",
62
  "transformers_version": "4.42.0.dev0",
63
  "use_cache": false,
64
  "use_weighted_layer_sum": false,
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2bc1e70acae0986367c6cd4292c8044090c319f904b234d758755172c8e30e1
3
+ size 1622472984