Fill-Mask
Transformers
PyTorch
Safetensors
English
nomic_bert
custom_code
zpn commited on
Commit
3a75eff
·
1 Parent(s): e119b48

Delete configuration_nomic_bert.py

Browse files
Files changed (1) hide show
  1. configuration_nomic_bert.py +0 -51
configuration_nomic_bert.py DELETED
@@ -1,51 +0,0 @@
1
- from transformers import GPT2Config
2
-
3
-
4
- class NomicBertConfig(GPT2Config):
5
- model_type = "nomic_bert"
6
-
7
- def __init__(self,
8
- prenorm=False,
9
- parallel_block=False,
10
- parallel_block_tied_norm=False,
11
- rotary_emb_fraction=0.0,
12
- fused_dropout_add_ln=False,
13
- fused_bias_fc=False,
14
- use_flash_attn=False,
15
- use_xentropy=False,
16
- qkv_proj_bias=True,
17
- rotary_emb_base=1000,
18
- rotary_emb_scale_base=None,
19
- rotary_emb_interleaved=False,
20
- mlp_fc1_bias=True,
21
- mlp_fc2_bias=True,
22
- use_rms_norm=False,
23
- causal=False,
24
- type_vocab_size=2,
25
- dense_seq_output=True,
26
- pad_vocab_size_multiple=1,
27
- tie_word_embeddings=True,
28
- **kwargs,
29
- ):
30
- self.prenorm = prenorm
31
- self.parallel_block = parallel_block
32
- self.parallel_block_tied_norm = parallel_block_tied_norm
33
- self.rotary_emb_fraction = rotary_emb_fraction
34
- self.tie_word_embeddings = tie_word_embeddings
35
- self.fused_dropout_add_ln = fused_dropout_add_ln
36
- self.fused_bias_fc = fused_bias_fc
37
- self.use_flash_attn = use_flash_attn
38
- self.use_xentropy = use_xentropy
39
- self.qkv_proj_bias = qkv_proj_bias
40
- self.rotary_emb_base = rotary_emb_base
41
- self.rotary_emb_scale_base = rotary_emb_scale_base
42
- self.rotary_emb_interleaved = rotary_emb_interleaved
43
- self.mlp_fc1_bias = mlp_fc1_bias
44
- self.mlp_fc2_bias = mlp_fc2_bias
45
- self.use_rms_norm = use_rms_norm
46
- self.causal = causal
47
- self.type_vocab_size = type_vocab_size
48
- self.dense_seq_output = dense_seq_output
49
- self.pad_vocab_size_multiple = pad_vocab_size_multiple
50
-
51
- super().__init__(**kwargs)