dawon62 commited on
Commit
e40f680
·
verified ·
1 Parent(s): 26b1bb2

Upload MarianForCausalLM

Browse files
Files changed (3) hide show
  1. config.json +42 -42
  2. generation_config.json +16 -16
  3. model.safetensors +2 -2
config.json CHANGED
@@ -1,42 +1,42 @@
1
- {
2
- "_name_or_path": "Helsinki-NLP/opus-mt-tc-big-en-ko",
3
- "activation_dropout": 0.0,
4
- "activation_function": "relu",
5
- "architectures": [
6
- "MarianForCausalLM"
7
- ],
8
- "attention_dropout": 0.0,
9
- "bos_token_id": 0,
10
- "classifier_dropout": 0.0,
11
- "d_model": 1024,
12
- "decoder_attention_heads": 16,
13
- "decoder_ffn_dim": 4096,
14
- "decoder_layerdrop": 0.0,
15
- "decoder_layers": 6,
16
- "decoder_start_token_id": 32000,
17
- "decoder_vocab_size": 32001,
18
- "dropout": 0.1,
19
- "encoder_attention_heads": 16,
20
- "encoder_ffn_dim": 4096,
21
- "encoder_layerdrop": 0.0,
22
- "encoder_layers": 6,
23
- "eos_token_id": 2,
24
- "forced_eos_token_id": null,
25
- "init_std": 0.02,
26
- "is_decoder": true,
27
- "is_encoder_decoder": false,
28
- "max_length": null,
29
- "max_position_embeddings": 1024,
30
- "model_type": "marian",
31
- "normalize_embedding": false,
32
- "num_beams": null,
33
- "num_hidden_layers": 6,
34
- "pad_token_id": 32000,
35
- "scale_embedding": true,
36
- "share_encoder_decoder_embeddings": true,
37
- "static_position_embeddings": true,
38
- "torch_dtype": "float16",
39
- "transformers_version": "4.45.1",
40
- "use_cache": true,
41
- "vocab_size": 32001
42
- }
 
1
+ {
2
+ "_name_or_path": "Helsinki-NLP/opus-mt-tc-big-en-ko",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "relu",
5
+ "architectures": [
6
+ "MarianForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 0,
10
+ "classifier_dropout": 0.0,
11
+ "d_model": 1024,
12
+ "decoder_attention_heads": 16,
13
+ "decoder_ffn_dim": 4096,
14
+ "decoder_layerdrop": 0.0,
15
+ "decoder_layers": 6,
16
+ "decoder_start_token_id": 32000,
17
+ "decoder_vocab_size": 32001,
18
+ "dropout": 0.1,
19
+ "encoder_attention_heads": 16,
20
+ "encoder_ffn_dim": 4096,
21
+ "encoder_layerdrop": 0.0,
22
+ "encoder_layers": 6,
23
+ "eos_token_id": 2,
24
+ "forced_eos_token_id": null,
25
+ "init_std": 0.02,
26
+ "is_decoder": true,
27
+ "is_encoder_decoder": false,
28
+ "max_length": null,
29
+ "max_position_embeddings": 1024,
30
+ "model_type": "marian",
31
+ "normalize_embedding": false,
32
+ "num_beams": null,
33
+ "num_hidden_layers": 6,
34
+ "pad_token_id": 32000,
35
+ "scale_embedding": true,
36
+ "share_encoder_decoder_embeddings": true,
37
+ "static_position_embeddings": true,
38
+ "torch_dtype": "float32",
39
+ "transformers_version": "4.45.1",
40
+ "use_cache": true,
41
+ "vocab_size": 32001
42
+ }
generation_config.json CHANGED
@@ -1,16 +1,16 @@
1
- {
2
- "bad_words_ids": [
3
- [
4
- 32000
5
- ]
6
- ],
7
- "bos_token_id": 0,
8
- "decoder_start_token_id": 32000,
9
- "eos_token_id": 2,
10
- "forced_eos_token_id": 2,
11
- "max_length": 512,
12
- "num_beams": 4,
13
- "pad_token_id": 32000,
14
- "renormalize_logits": true,
15
- "transformers_version": "4.45.1"
16
- }
 
1
+ {
2
+ "bad_words_ids": [
3
+ [
4
+ 32000
5
+ ]
6
+ ],
7
+ "bos_token_id": 0,
8
+ "decoder_start_token_id": 32000,
9
+ "eos_token_id": 2,
10
+ "forced_eos_token_id": 2,
11
+ "max_length": 512,
12
+ "num_beams": 4,
13
+ "pad_token_id": 32000,
14
+ "renormalize_logits": true,
15
+ "transformers_version": "4.45.1"
16
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:88bf4c1f4b4dccb35fa2acfe6a4a39e3d3b5f49efb5ab3247fe734e2f641e38b
3
- size 275512400
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11978e10c79c4273df5897b527d5397e561ff2f296ee7664fed98cd628ee8552
3
+ size 550999184