ngocson2002
commited on
Commit
·
ca39bed
1
Parent(s):
80778d7
Upload BEiT3ForVietnameseVisualQuestionAnswering
Browse files- config.json +52 -0
- configuration_vivqa.py +1 -0
- model.safetensors +3 -0
- modeling_vivqa.py +1 -0
config.json
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_dropout": 0.0,
|
3 |
+
"activation_fn": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"BEiT3ForVietnameseVisualQuestionAnswering"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_vivqa.ViVQAConfig",
|
10 |
+
"AutoModel": "modeling_vivqa.BEiT3ForVietnameseVisualQuestionAnswering"
|
11 |
+
},
|
12 |
+
"bert_init": false,
|
13 |
+
"checkpoint_activations": false,
|
14 |
+
"ddp_rank": 0,
|
15 |
+
"deepnorm": false,
|
16 |
+
"drop_path_rate": 0.0,
|
17 |
+
"dropout": 0.0,
|
18 |
+
"encoder_attention_heads": 4,
|
19 |
+
"encoder_embed_dim": 768,
|
20 |
+
"encoder_ffn_embed_dim": 3072,
|
21 |
+
"encoder_layers": 4,
|
22 |
+
"encoder_normalize_before": true,
|
23 |
+
"fsdp": false,
|
24 |
+
"img_size": 224,
|
25 |
+
"in_chans": 3,
|
26 |
+
"layernorm_embedding": false,
|
27 |
+
"layernorm_eps": 1e-05,
|
28 |
+
"max_rel_pos": 0,
|
29 |
+
"max_source_positions": 1024,
|
30 |
+
"model_type": "vivqa",
|
31 |
+
"moe_eval_capacity_token_fraction": 0.25,
|
32 |
+
"moe_expert_count": 0,
|
33 |
+
"moe_freq": 0,
|
34 |
+
"moe_gating_use_fp32": true,
|
35 |
+
"moe_normalize_gate_prob_before_dropping": false,
|
36 |
+
"moe_second_expert_policy": "random",
|
37 |
+
"moe_top1_expert": false,
|
38 |
+
"multiway": true,
|
39 |
+
"no_output_layer": true,
|
40 |
+
"no_scale_embedding": true,
|
41 |
+
"normalize_output": true,
|
42 |
+
"patch_size": 16,
|
43 |
+
"rel_pos_buckets": 0,
|
44 |
+
"share_encoder_input_output_embed": false,
|
45 |
+
"subln": true,
|
46 |
+
"torch_dtype": "float32",
|
47 |
+
"transformers_version": "4.36.2",
|
48 |
+
"use_xmoe": false,
|
49 |
+
"vocab_size": -1,
|
50 |
+
"xpos_rel_pos": false,
|
51 |
+
"xpos_scale_base": 512
|
52 |
+
}
|
configuration_vivqa.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
from transformers import PretrainedConfig
|
|
|
2 |
|
3 |
class ViVQAConfig(PretrainedConfig):
|
4 |
model_type = "vivqa"
|
|
|
1 |
from transformers import PretrainedConfig
|
2 |
+
from torchscale.architecture.config import EncoderConfig
|
3 |
|
4 |
class ViVQAConfig(PretrainedConfig):
|
5 |
model_type = "vivqa"
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:06d19ca8565c6ca7b5717df05fd5490768bf2d73e27f4b662fbd9ae120ca71e1
|
3 |
+
size 4911305908
|
modeling_vivqa.py
CHANGED
@@ -13,6 +13,7 @@ from dataclasses import dataclass
|
|
13 |
from typing import Optional
|
14 |
from efficientnet_pytorch import EfficientNet
|
15 |
from lavis.common.registry import registry
|
|
|
16 |
|
17 |
class BartPhoExtractor(nn.Module):
|
18 |
def __init__(self):
|
|
|
13 |
from typing import Optional
|
14 |
from efficientnet_pytorch import EfficientNet
|
15 |
from lavis.common.registry import registry
|
16 |
+
from .configuration_vivqa import ViVQAConfig
|
17 |
|
18 |
class BartPhoExtractor(nn.Module):
|
19 |
def __init__(self):
|