Reyad-Ahmmed commited on
Commit
0420873
·
verified ·
1 Parent(s): 9397789

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -95,7 +95,7 @@ if (should_train_model=='1'): #train model
95
 
96
  #tokenizer = BertTokenizer.from_pretrained(repo_name, subfolder="bert_embeddings_finetune
97
 
98
- tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
99
 
100
  # I made sure to add all the ones in the training and eval data to this list
101
  # since we are training using data that only contains the left tag - we don't need right tags added to this list
@@ -106,7 +106,7 @@ if (should_train_model=='1'): #train model
106
  # Model
107
  #model = BertForSequenceClassification.from_pretrained(repo_name, subfolder="bert_embeddings_finetune", output_attentions=True, num_labels=len(label_mapping), output_hidden_states=True).to('cpu')
108
 
109
- model = BertForSequenceClassification.from_pretrained("bert-base-uncased", output_attentions=True, num_labels=len(label_mapping), output_hidden_states=True).to('cpu')
110
 
111
  # Reset tokenizer size to include the new size after adding the tags to the tokenizer's tokens
112
  model.resize_token_embeddings(len(tokenizer))
 
95
 
96
  #tokenizer = BertTokenizer.from_pretrained(repo_name, subfolder="bert_embeddings_finetune
97
 
98
+ tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
99
 
100
  # I made sure to add all the ones in the training and eval data to this list
101
  # since we are training using data that only contains the left tag - we don't need right tags added to this list
 
106
  # Model
107
  #model = BertForSequenceClassification.from_pretrained(repo_name, subfolder="bert_embeddings_finetune", output_attentions=True, num_labels=len(label_mapping), output_hidden_states=True).to('cpu')
108
 
109
+ model = BertForSequenceClassification.from_pretrained("roberta-base", output_attentions=True, num_labels=len(label_mapping), output_hidden_states=True).to('cpu')
110
 
111
  # Reset tokenizer size to include the new size after adding the tags to the tokenizer's tokens
112
  model.resize_token_embeddings(len(tokenizer))