T-Almeida commited on
Commit
519e43a
·
verified ·
1 Parent(s): b5d06a1

Upload model

Browse files
config.json CHANGED
@@ -64,5 +64,6 @@
64
  "transformers_version": "4.37.2",
65
  "type_vocab_size": 2,
66
  "use_cache": true,
 
67
  "vocab_size": 28895
68
  }
 
64
  "transformers_version": "4.37.2",
65
  "type_vocab_size": 2,
66
  "use_cache": true,
67
+ "version": "0.1.1",
68
  "vocab_size": 28895
69
  }
configuration_bionexttager.py CHANGED
@@ -13,8 +13,10 @@ class BioNextTaggerConfig(PretrainedConfig):
13
  percentage_tags = 0.2,
14
  p_augmentation = 0.5,
15
  crf_reduction = "mean",
 
16
  **kwargs,
17
  ):
 
18
  self.augmentation = augmentation
19
  self.context_size = context_size
20
  self.percentage_tags = percentage_tags
 
13
  percentage_tags = 0.2,
14
  p_augmentation = 0.5,
15
  crf_reduction = "mean",
16
+ version="0.1.1",
17
  **kwargs,
18
  ):
19
+ self.version = version
20
  self.augmentation = augmentation
21
  self.context_size = context_size
22
  self.percentage_tags = percentage_tags
modeling_bionexttagger.py CHANGED
@@ -1,7 +1,7 @@
1
 
2
  import os
3
  from typing import Optional, Union
4
- from transformers import AutoModel, PreTrainedModel, AutoConfig
5
  from transformers.modeling_outputs import TokenClassifierOutput
6
  from torch import nn
7
  from torch.nn import CrossEntropyLoss
@@ -22,7 +22,11 @@ class BioNextTaggerModel(PreTrainedModel):
22
  def __init__(self, config):
23
  super().__init__(config)
24
  self.num_labels = config.num_labels
25
- self.bert = AutoModel.from_pretrained(config._name_or_path, config=config.get_backbonemodel_config(), add_pooling_layer=False)
 
 
 
 
26
  # self.vocab_size = config.vocab_size
27
  classifier_dropout = (config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob)
28
  self.dropout = nn.Dropout(config.hidden_dropout_prob)
 
1
 
2
  import os
3
  from typing import Optional, Union
4
+ from transformers import AutoModel, PreTrainedModel, AutoConfig, BertModel
5
  from transformers.modeling_outputs import TokenClassifierOutput
6
  from torch import nn
7
  from torch.nn import CrossEntropyLoss
 
22
  def __init__(self, config):
23
  super().__init__(config)
24
  self.num_labels = config.num_labels
25
+ #print("LOAD BERT?")
26
+ self.bert = BertModel(config.get_backbonemodel_config(), add_pooling_layer=False)
27
+ #AutoModel.from_pretrained(config._name_or_path,
28
+ # config=config.get_backbonemodel_config(),
29
+ # add_pooling_layer=False)
30
  # self.vocab_size = config.vocab_size
31
  classifier_dropout = (config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob)
32
  self.dropout = nn.Dropout(config.hidden_dropout_prob)