Reyad-Ahmmed commited on
Commit
5ed117c
·
verified ·
1 Parent(s): eec1383

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -111,8 +111,8 @@ if (should_train_model=='1'): #train model
111
 
112
 
113
  # Model
114
- model = BertForSequenceClassification.from_pretrained(repo_name, subfolder="bert_embeddings_finetune", output_attentions=True, num_labels=len(label_mapping), output_hidden_states=True).to('cuda')
115
- # model = BertForSequenceClassification.from_pretrained('./mitra_ai_fleet_bert', output_attentions=True, num_labels=len(label_mapping), output_hidden_states=True).to('cuda')
116
 
117
 
118
  # Reset tokenizer size to include the new size after adding the tags to the tokenizer's tokens
@@ -353,7 +353,7 @@ else:
353
  model_save_path = "./saved_fleet_model"
354
  tokenizer_save_path = "./saved_fleet_tokenizer"
355
  # RobertaTokenizer.from_pretrained(model_save_path)
356
- model = AutoModelForSequenceClassification.from_pretrained(model_save_path).to('cuda')
357
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_save_path)
358
 
359
  #Define the label mappings (this must match the mapping used during training)
@@ -370,7 +370,7 @@ def classify_user_input():
370
  break
371
 
372
  # Tokenize and predict
373
- input_encoding = tokenizer(user_input, padding=True, truncation=True, return_tensors="pt").to('cuda')
374
 
375
  with torch.no_grad():
376
  #attention_mask = input_encoding['attention_mask'].clone()
 
111
 
112
 
113
  # Model
114
+ model = BertForSequenceClassification.from_pretrained(repo_name, subfolder="bert_embeddings_finetune", output_attentions=True, num_labels=len(label_mapping), output_hidden_states=True).to('cpu')
115
+ # model = BertForSequenceClassification.from_pretrained('./mitra_ai_fleet_bert', output_attentions=True, num_labels=len(label_mapping), output_hidden_states=True).to('cpu')
116
 
117
 
118
  # Reset tokenizer size to include the new size after adding the tags to the tokenizer's tokens
 
353
  model_save_path = "./saved_fleet_model"
354
  tokenizer_save_path = "./saved_fleet_tokenizer"
355
  # RobertaTokenizer.from_pretrained(model_save_path)
356
+ model = AutoModelForSequenceClassification.from_pretrained(model_save_path).to('cpu')
357
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_save_path)
358
 
359
  #Define the label mappings (this must match the mapping used during training)
 
370
  break
371
 
372
  # Tokenize and predict
373
+ input_encoding = tokenizer(user_input, padding=True, truncation=True, return_tensors="pt").to('cpu')
374
 
375
  with torch.no_grad():
376
  #attention_mask = input_encoding['attention_mask'].clone()