Spaces:
Sleeping
Sleeping
Lautaro Cardarelli
commited on
Commit
·
e396b2c
1
Parent(s):
c2e73e4
add fix for qa
Browse files
app.py
CHANGED
@@ -81,8 +81,6 @@ qg_tokenizer = T5Tokenizer.from_pretrained('valhalla/t5-base-e2e-qg')
|
|
81 |
|
82 |
|
83 |
def generate_questions(text):
|
84 |
-
print('qg')
|
85 |
-
print(text)
|
86 |
qg_final_model = E2EQGPipeline(qg_model, qg_tokenizer)
|
87 |
questions = qg_final_model.generate_questions(text)
|
88 |
translator = Translator()
|
@@ -111,14 +109,12 @@ qa_model = T5ForConditionalGeneration.from_pretrained(ckpt).to(device)
|
|
111 |
def generate_question_response(question, context):
|
112 |
input_text = 'question: %s context: %s' % (question, context)
|
113 |
print(input_text)
|
114 |
-
features =
|
115 |
output = qa_model.generate(
|
116 |
input_ids=features['input_ids'].to(device),
|
117 |
attention_mask=features['attention_mask'].to(device),
|
118 |
-
temperature=1.0
|
119 |
)
|
120 |
-
print('output')
|
121 |
-
print(output)
|
122 |
return qa_tokenizer.decode(output[0], skip_special_tokens=True)
|
123 |
|
124 |
|
|
|
81 |
|
82 |
|
83 |
def generate_questions(text):
|
|
|
|
|
84 |
qg_final_model = E2EQGPipeline(qg_model, qg_tokenizer)
|
85 |
questions = qg_final_model.generate_questions(text)
|
86 |
translator = Translator()
|
|
|
109 |
def generate_question_response(question, context):
|
110 |
input_text = 'question: %s context: %s' % (question, context)
|
111 |
print(input_text)
|
112 |
+
features = qa_tokenizer([input_text], padding='max_length', truncation=True, max_length=512, return_tensors='pt')
|
113 |
output = qa_model.generate(
|
114 |
input_ids=features['input_ids'].to(device),
|
115 |
attention_mask=features['attention_mask'].to(device),
|
116 |
+
temperature=1.0
|
117 |
)
|
|
|
|
|
118 |
return qa_tokenizer.decode(output[0], skip_special_tokens=True)
|
119 |
|
120 |
|