Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -39,20 +39,6 @@ beto_sqac_model_spanish = 'salsarra/Beto-Spanish-Cased-SQAC'
|
|
39 |
beto_sqac_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(beto_sqac_model_spanish)
|
40 |
beto_sqac_tokenizer_spanish = AutoTokenizer.from_pretrained(beto_sqac_model_spanish)
|
41 |
|
42 |
-
# Define question_answering_v1 for ConfliBERT English
|
43 |
-
def question_answering_v1(context, question):
|
44 |
-
try:
|
45 |
-
inputs = qa_tokenizer_v1(question, context, return_tensors='tf', truncation=True)
|
46 |
-
outputs = qa_model_v1(inputs)
|
47 |
-
answer_start = tf.argmax(outputs.start_logits, axis=1).numpy()[0]
|
48 |
-
answer_end = tf.argmax(outputs.end_logits, axis=1).numpy()[0] + 1
|
49 |
-
answer = qa_tokenizer_v1.convert_tokens_to_string(
|
50 |
-
qa_tokenizer_v1.convert_ids_to_tokens(inputs['input_ids'].numpy()[0][answer_start:answer_end])
|
51 |
-
)
|
52 |
-
return f"<span style='color: green; font-weight: bold;'>{answer}</span>"
|
53 |
-
except Exception as e:
|
54 |
-
return handle_error_message(e)
|
55 |
-
|
56 |
# Define error handling to separate input size errors from other issues
|
57 |
def handle_error_message(e, default_limit=512):
|
58 |
error_message = str(e)
|
@@ -70,6 +56,33 @@ def handle_error_message(e, default_limit=512):
|
|
70 |
|
71 |
return f"<span style='color: red; font-weight: bold;'>Error: {error_message}</span>"
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
# Main comparison function with language selection
|
74 |
def compare_question_answering(language, context, question):
|
75 |
if language == "English":
|
|
|
39 |
beto_sqac_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(beto_sqac_model_spanish)
|
40 |
beto_sqac_tokenizer_spanish = AutoTokenizer.from_pretrained(beto_sqac_model_spanish)
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# Define error handling to separate input size errors from other issues
|
43 |
def handle_error_message(e, default_limit=512):
|
44 |
error_message = str(e)
|
|
|
56 |
|
57 |
return f"<span style='color: red; font-weight: bold;'>Error: {error_message}</span>"
|
58 |
|
59 |
+
# Define question_answering_v1 for ConfliBERT English
|
60 |
+
def question_answering_v1(context, question):
|
61 |
+
try:
|
62 |
+
inputs = qa_tokenizer_v1(question, context, return_tensors='tf', truncation=True)
|
63 |
+
outputs = qa_model_v1(inputs)
|
64 |
+
answer_start = tf.argmax(outputs.start_logits, axis=1).numpy()[0]
|
65 |
+
answer_end = tf.argmax(outputs.end_logits, axis=1).numpy()[0] + 1
|
66 |
+
answer = qa_tokenizer_v1.convert_tokens_to_string(
|
67 |
+
qa_tokenizer_v1.convert_ids_to_tokens(inputs['input_ids'].numpy()[0][answer_start:answer_end])
|
68 |
+
)
|
69 |
+
return f"<span style='color: green; font-weight: bold;'>{answer}</span>"
|
70 |
+
except Exception as e:
|
71 |
+
return handle_error_message(e)
|
72 |
+
|
73 |
+
# Define bert_question_answering_v1 for BERT English
|
74 |
+
def bert_question_answering_v1(context, question):
|
75 |
+
try:
|
76 |
+
inputs = bert_qa_tokenizer_v1(question, context, return_tensors='tf', truncation=True)
|
77 |
+
outputs = bert_qa_model_v1(inputs)
|
78 |
+
answer_start = tf.argmax(outputs.start_logits, axis=1).numpy()[0]
|
79 |
+
answer_end = tf.argmax(outputs.end_logits, axis=1).numpy()[0] + 1
|
80 |
+
answer = bert_qa_tokenizer_v1.convert_tokens_to_string(
|
81 |
+
bert_qa_tokenizer_v1.convert_ids_to_tokens(inputs['input_ids'].numpy()[0][answer_start:answer_end])
|
82 |
+
)
|
83 |
+
return f"<span style='font-weight: bold;'>{answer}</span>"
|
84 |
+
except Exception as e:
|
85 |
+
return handle_error_message(e)
|
86 |
# Main comparison function with language selection
|
87 |
def compare_question_answering(language, context, question):
|
88 |
if language == "English":
|