colesimmons commited on
Commit
59dc109
·
verified ·
1 Parent(s): 765a321

Upload model

Browse files
Files changed (3) hide show
  1. config.json +47 -49
  2. generation_config.json +1 -3
  3. model.safetensors +2 -2
config.json CHANGED
@@ -1,33 +1,24 @@
1
  {
2
- "_name_or_path": "./models/cont_alt/best_model",
3
- "activation_dropout": 0.1,
4
  "architectures": [
5
- "VisionEncoderDecoderModel"
6
  ],
7
- "attention_dropout": 0.1,
8
- "bos_token_id": 0,
9
  "decoder": {
10
- "_name_or_path": "",
11
- "activation_dropout": 0.0,
12
- "activation_function": "gelu",
13
  "add_cross_attention": true,
14
- "architectures": null,
15
- "attention_dropout": 0.0,
 
 
16
  "bad_words_ids": null,
17
  "begin_suppress_tokens": null,
18
  "bos_token_id": 0,
19
  "chunk_size_feed_forward": 0,
20
- "classifier_dropout": 0.0,
21
- "cross_attention_hidden_size": 768,
22
- "d_model": 1024,
23
- "decoder_attention_heads": 16,
24
- "decoder_ffn_dim": 4096,
25
- "decoder_layerdrop": 0.0,
26
- "decoder_layers": 12,
27
- "decoder_start_token_id": 2,
28
  "diversity_penalty": 0.0,
29
  "do_sample": false,
30
- "dropout": 0.1,
31
  "early_stopping": false,
32
  "encoder_no_repeat_ngram_size": 0,
33
  "eos_token_id": 2,
@@ -35,31 +26,39 @@
35
  "finetuning_task": null,
36
  "forced_bos_token_id": null,
37
  "forced_eos_token_id": null,
 
 
 
38
  "id2label": {
39
  "0": "LABEL_0",
40
  "1": "LABEL_1"
41
  },
42
- "init_std": 0.02,
 
43
  "is_decoder": true,
44
  "is_encoder_decoder": false,
45
  "label2id": {
46
  "LABEL_0": 0,
47
  "LABEL_1": 1
48
  },
49
- "layernorm_embedding": true,
50
  "length_penalty": 1.0,
51
  "max_length": 20,
52
- "max_position_embeddings": 512,
53
  "min_length": 0,
54
- "model_type": "trocr",
55
  "no_repeat_ngram_size": 0,
 
56
  "num_beam_groups": 1,
57
  "num_beams": 1,
 
58
  "num_return_sequences": 1,
59
  "output_attentions": false,
60
  "output_hidden_states": false,
 
61
  "output_scores": false,
62
  "pad_token_id": 1,
 
63
  "prefix": null,
64
  "problem_type": null,
65
  "pruned_heads": {},
@@ -67,7 +66,6 @@
67
  "repetition_penalty": 1.0,
68
  "return_dict": true,
69
  "return_dict_in_generate": false,
70
- "scale_embedding": false,
71
  "sep_token_id": null,
72
  "suppress_tokens": null,
73
  "task_specific_params": null,
@@ -80,43 +78,43 @@
80
  "top_p": 1.0,
81
  "torch_dtype": null,
82
  "torchscript": false,
 
83
  "typical_p": 1.0,
84
  "use_bfloat16": false,
85
- "use_cache": false,
86
- "use_learned_position_embeddings": true,
87
- "vocab_size": 632
88
  },
89
  "decoder_start_token_id": 0,
90
- "dropout": 0.1,
91
  "encoder": {
92
- "_name_or_path": "",
93
  "add_cross_attention": false,
94
- "architectures": null,
95
- "attention_probs_dropout_prob": 0.0,
 
 
96
  "bad_words_ids": null,
97
  "begin_suppress_tokens": null,
98
- "bos_token_id": null,
99
  "chunk_size_feed_forward": 0,
 
100
  "cross_attention_hidden_size": null,
101
  "decoder_start_token_id": null,
102
  "diversity_penalty": 0.0,
103
  "do_sample": false,
104
  "early_stopping": false,
105
  "encoder_no_repeat_ngram_size": 0,
106
- "encoder_stride": 16,
107
- "eos_token_id": null,
108
  "exponential_decay_length_penalty": null,
109
  "finetuning_task": null,
110
  "forced_bos_token_id": null,
111
  "forced_eos_token_id": null,
112
  "hidden_act": "gelu",
113
- "hidden_dropout_prob": 0.0,
114
  "hidden_size": 768,
115
  "id2label": {
116
  "0": "LABEL_0",
117
  "1": "LABEL_1"
118
  },
119
- "image_size": 384,
120
  "initializer_range": 0.02,
121
  "intermediate_size": 3072,
122
  "is_decoder": false,
@@ -125,27 +123,27 @@
125
  "LABEL_0": 0,
126
  "LABEL_1": 1
127
  },
128
- "layer_norm_eps": 1e-12,
129
  "length_penalty": 1.0,
130
  "max_length": 20,
 
131
  "min_length": 0,
132
- "model_type": "vit",
133
  "no_repeat_ngram_size": 0,
134
  "num_attention_heads": 12,
135
  "num_beam_groups": 1,
136
  "num_beams": 1,
137
- "num_channels": 3,
138
  "num_hidden_layers": 12,
139
  "num_return_sequences": 1,
140
  "output_attentions": false,
141
  "output_hidden_states": false,
 
142
  "output_scores": false,
143
- "pad_token_id": null,
144
- "patch_size": 16,
145
  "prefix": null,
146
  "problem_type": null,
147
  "pruned_heads": {},
148
- "qkv_bias": false,
149
  "remove_invalid_values": false,
150
  "repetition_penalty": 1.0,
151
  "return_dict": true,
@@ -160,17 +158,17 @@
160
  "tokenizer_class": null,
161
  "top_k": 50,
162
  "top_p": 1.0,
163
- "torch_dtype": null,
164
  "torchscript": false,
 
165
  "typical_p": 1.0,
166
- "use_bfloat16": false
 
 
167
  },
168
- "eos_token_id": 2,
169
  "is_encoder_decoder": true,
170
- "model_type": "vision-encoder-decoder",
171
  "pad_token_id": 1,
172
- "processor_class": "TrOCRProcessor",
173
- "tie_word_embeddings": false,
174
  "torch_dtype": "float32",
175
- "transformers_version": "4.41.0"
176
  }
 
1
  {
2
+ "_name_or_path": "/models/final_all_unfrozen/best_model",
 
3
  "architectures": [
4
+ "EncoderDecoderModel"
5
  ],
 
 
6
  "decoder": {
7
+ "_name_or_path": "FacebookAI/xlm-roberta-base",
 
 
8
  "add_cross_attention": true,
9
+ "architectures": [
10
+ "XLMRobertaForMaskedLM"
11
+ ],
12
+ "attention_probs_dropout_prob": 0.1,
13
  "bad_words_ids": null,
14
  "begin_suppress_tokens": null,
15
  "bos_token_id": 0,
16
  "chunk_size_feed_forward": 0,
17
+ "classifier_dropout": null,
18
+ "cross_attention_hidden_size": null,
19
+ "decoder_start_token_id": null,
 
 
 
 
 
20
  "diversity_penalty": 0.0,
21
  "do_sample": false,
 
22
  "early_stopping": false,
23
  "encoder_no_repeat_ngram_size": 0,
24
  "eos_token_id": 2,
 
26
  "finetuning_task": null,
27
  "forced_bos_token_id": null,
28
  "forced_eos_token_id": null,
29
+ "hidden_act": "gelu",
30
+ "hidden_dropout_prob": 0.1,
31
+ "hidden_size": 768,
32
  "id2label": {
33
  "0": "LABEL_0",
34
  "1": "LABEL_1"
35
  },
36
+ "initializer_range": 0.02,
37
+ "intermediate_size": 3072,
38
  "is_decoder": true,
39
  "is_encoder_decoder": false,
40
  "label2id": {
41
  "LABEL_0": 0,
42
  "LABEL_1": 1
43
  },
44
+ "layer_norm_eps": 1e-05,
45
  "length_penalty": 1.0,
46
  "max_length": 20,
47
+ "max_position_embeddings": 514,
48
  "min_length": 0,
49
+ "model_type": "xlm-roberta",
50
  "no_repeat_ngram_size": 0,
51
+ "num_attention_heads": 12,
52
  "num_beam_groups": 1,
53
  "num_beams": 1,
54
+ "num_hidden_layers": 12,
55
  "num_return_sequences": 1,
56
  "output_attentions": false,
57
  "output_hidden_states": false,
58
+ "output_past": true,
59
  "output_scores": false,
60
  "pad_token_id": 1,
61
+ "position_embedding_type": "absolute",
62
  "prefix": null,
63
  "problem_type": null,
64
  "pruned_heads": {},
 
66
  "repetition_penalty": 1.0,
67
  "return_dict": true,
68
  "return_dict_in_generate": false,
 
69
  "sep_token_id": null,
70
  "suppress_tokens": null,
71
  "task_specific_params": null,
 
78
  "top_p": 1.0,
79
  "torch_dtype": null,
80
  "torchscript": false,
81
+ "type_vocab_size": 1,
82
  "typical_p": 1.0,
83
  "use_bfloat16": false,
84
+ "use_cache": true,
85
+ "vocab_size": 1024
 
86
  },
87
  "decoder_start_token_id": 0,
 
88
  "encoder": {
89
+ "_name_or_path": "colesimmons/xlm-roberta-sumerian-glyphs",
90
  "add_cross_attention": false,
91
+ "architectures": [
92
+ "XLMRobertaForMaskedLM"
93
+ ],
94
+ "attention_probs_dropout_prob": 0.15,
95
  "bad_words_ids": null,
96
  "begin_suppress_tokens": null,
97
+ "bos_token_id": 0,
98
  "chunk_size_feed_forward": 0,
99
+ "classifier_dropout": null,
100
  "cross_attention_hidden_size": null,
101
  "decoder_start_token_id": null,
102
  "diversity_penalty": 0.0,
103
  "do_sample": false,
104
  "early_stopping": false,
105
  "encoder_no_repeat_ngram_size": 0,
106
+ "eos_token_id": 2,
 
107
  "exponential_decay_length_penalty": null,
108
  "finetuning_task": null,
109
  "forced_bos_token_id": null,
110
  "forced_eos_token_id": null,
111
  "hidden_act": "gelu",
112
+ "hidden_dropout_prob": 0.15,
113
  "hidden_size": 768,
114
  "id2label": {
115
  "0": "LABEL_0",
116
  "1": "LABEL_1"
117
  },
 
118
  "initializer_range": 0.02,
119
  "intermediate_size": 3072,
120
  "is_decoder": false,
 
123
  "LABEL_0": 0,
124
  "LABEL_1": 1
125
  },
126
+ "layer_norm_eps": 1e-05,
127
  "length_penalty": 1.0,
128
  "max_length": 20,
129
+ "max_position_embeddings": 514,
130
  "min_length": 0,
131
+ "model_type": "xlm-roberta",
132
  "no_repeat_ngram_size": 0,
133
  "num_attention_heads": 12,
134
  "num_beam_groups": 1,
135
  "num_beams": 1,
 
136
  "num_hidden_layers": 12,
137
  "num_return_sequences": 1,
138
  "output_attentions": false,
139
  "output_hidden_states": false,
140
+ "output_past": true,
141
  "output_scores": false,
142
+ "pad_token_id": 1,
143
+ "position_embedding_type": "absolute",
144
  "prefix": null,
145
  "problem_type": null,
146
  "pruned_heads": {},
 
147
  "remove_invalid_values": false,
148
  "repetition_penalty": 1.0,
149
  "return_dict": true,
 
158
  "tokenizer_class": null,
159
  "top_k": 50,
160
  "top_p": 1.0,
161
+ "torch_dtype": "float32",
162
  "torchscript": false,
163
+ "type_vocab_size": 1,
164
  "typical_p": 1.0,
165
+ "use_bfloat16": false,
166
+ "use_cache": true,
167
+ "vocab_size": 640
168
  },
 
169
  "is_encoder_decoder": true,
170
+ "model_type": "encoder-decoder",
171
  "pad_token_id": 1,
 
 
172
  "torch_dtype": "float32",
173
+ "transformers_version": "4.41.1"
174
  }
generation_config.json CHANGED
@@ -1,9 +1,7 @@
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 0,
4
- "decoder_start_token_id": 2,
5
  "eos_token_id": 2,
6
  "pad_token_id": 1,
7
- "transformers_version": "4.41.0",
8
- "use_cache": false
9
  }
 
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 0,
 
4
  "eos_token_id": 2,
5
  "pad_token_id": 1,
6
+ "transformers_version": "4.41.1"
 
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:217a48143c297db57cb4ede3af1c00c8a47acec12ca1f1b60ead4a997f3116d8
3
- size 1132449976
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c413f33d46356b77d25ebc48902faa99379ecad2e7ffd186a24a3957d99d7dff
3
+ size 806992280