Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,15 +5,18 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
|
5 |
|
6 |
model_repo_path = 'nxmwxm/correct_answer'
|
7 |
|
8 |
-
# Load tokenizer
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_repo_path)
|
10 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path)
|
11 |
|
12 |
-
#
|
13 |
special_tokens_dict = {'additional_special_tokens': ['<extra_id_99>']}
|
14 |
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
|
|
|
|
|
|
|
15 |
model.resize_token_embeddings(len(tokenizer))
|
16 |
|
|
|
17 |
# Load the pipeline with your model and tokenizer
|
18 |
qa_pipeline = pipeline(
|
19 |
'question-answering',
|
|
|
5 |
|
6 |
model_repo_path = 'nxmwxm/correct_answer'
|
7 |
|
8 |
+
# Load the tokenizer
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_repo_path)
|
|
|
10 |
|
11 |
+
# Define special tokens
|
12 |
special_tokens_dict = {'additional_special_tokens': ['<extra_id_99>']}
|
13 |
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
|
14 |
+
|
15 |
+
# Load the model and resize token embeddings
|
16 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path)
|
17 |
model.resize_token_embeddings(len(tokenizer))
|
18 |
|
19 |
+
|
20 |
# Load the pipeline with your model and tokenizer
|
21 |
qa_pipeline = pipeline(
|
22 |
'question-answering',
|