AlGe commited on
Commit
8cb52d1
·
verified ·
1 Parent(s): 2a3a970

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -3,24 +3,25 @@ import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForTokenClassification
4
  import os
5
 
 
 
6
  # Load the tokenizer and models for the first pipeline
7
- tokenizer_ext = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_token")
8
- model_ext = AutoModelForTokenClassification.from_pretrained("AlGe/deberta-v3-large_token")
9
  tokenizer_ext.model_max_length = 512
10
  pipe_ext = gr.pipeline("ner", model=model_ext, tokenizer=tokenizer_ext)
11
 
12
  # Load the tokenizer and models for the second pipeline
13
- tokenizer_ais = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_AIS-token")
14
- model_ais = AutoModelForTokenClassification.from_pretrained("AlGe/deberta-v3-large_AIS-token")
15
  tokenizer_ais.model_max_length = 512
16
  pipe_ais = gr.pipeline("ner", model=model_ais, tokenizer=tokenizer_ais)
17
 
18
  # Load the tokenizer and models for the third pipeline
19
- auth_token = os.environ['HF_TOKEN']
20
- model1 = AutoModelForSequenceClassification.from_pretrained("AlGe/deberta-v3-large_Int_segment", num_labels=1, use_auth_token=auth_token)
21
- tokenizer1 = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_Int_segment", use_auth_token=auth_token)
22
 
23
- model2 = AutoModelForSequenceClassification.from_pretrained("AlGe/deberta-v3-large_seq_ext", num_labels=1, use_auth_token=auth_token)
24
 
25
  # Define functions to process inputs
26
  def process_ner(text, pipeline):
 
3
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForTokenClassification
4
  import os
5
 
6
+ auth_token = os.environ['HF_TOKEN']
7
+
8
  # Load the tokenizer and models for the first pipeline
9
+ tokenizer_ext = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_token", token=auth_token)
10
+ model_ext = AutoModelForTokenClassification.from_pretrained("AlGe/deberta-v3-large_token", token=auth_token)
11
  tokenizer_ext.model_max_length = 512
12
  pipe_ext = gr.pipeline("ner", model=model_ext, tokenizer=tokenizer_ext)
13
 
14
  # Load the tokenizer and models for the second pipeline
15
+ tokenizer_ais = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_AIS-token", token=auth_token)
16
+ model_ais = AutoModelForTokenClassification.from_pretrained("AlGe/deberta-v3-large_AIS-token", token=auth_token)
17
  tokenizer_ais.model_max_length = 512
18
  pipe_ais = gr.pipeline("ner", model=model_ais, tokenizer=tokenizer_ais)
19
 
20
  # Load the tokenizer and models for the third pipeline
21
+ model1 = AutoModelForSequenceClassification.from_pretrained("AlGe/deberta-v3-large_Int_segment", num_labels=1, token=auth_token)
22
+ tokenizer1 = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_Int_segment", token=auth_token)
 
23
 
24
+ model2 = AutoModelForSequenceClassification.from_pretrained("AlGe/deberta-v3-large_seq_ext", num_labels=1, token=auth_token)
25
 
26
  # Define functions to process inputs
27
  def process_ner(text, pipeline):