ConfliBERT-QA / app.py
salsarra's picture
Create app.py
70ca632 verified
raw
history blame
12.8 kB
import torch
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering, AutoModelForCausalLM
import gradio as gr
import re
# Check if GPU is available and use it if possible
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load Spanish models and tokenizers
confli_model_spanish = 'salsarra/ConfliBERT-Spanish-Beto-Cased-NewsQA'
confli_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(confli_model_spanish)
confli_tokenizer_spanish = AutoTokenizer.from_pretrained(confli_model_spanish)
beto_model_spanish = 'salsarra/Beto-Spanish-Cased-NewsQA'
beto_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(beto_model_spanish)
beto_tokenizer_spanish = AutoTokenizer.from_pretrained(beto_model_spanish)
confli_sqac_model_spanish = 'salsarra/ConfliBERT-Spanish-Beto-Cased-SQAC'
confli_sqac_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(confli_sqac_model_spanish)
confli_sqac_tokenizer_spanish = AutoTokenizer.from_pretrained(confli_sqac_model_spanish)
beto_sqac_model_spanish = 'salsarra/Beto-Spanish-Cased-SQAC'
beto_sqac_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(beto_sqac_model_spanish)
beto_sqac_tokenizer_spanish = AutoTokenizer.from_pretrained(beto_sqac_model_spanish)
# Load Spanish GPT-2 model and tokenizer
gpt2_spanish_model_name = 'datificate/gpt2-small-spanish'
gpt2_spanish_tokenizer = AutoTokenizer.from_pretrained(gpt2_spanish_model_name)
gpt2_spanish_model = AutoModelForCausalLM.from_pretrained(gpt2_spanish_model_name).to(device)
# Load BLOOM-1.7B model and tokenizer for Spanish
bloom_model_name = 'bigscience/bloom-1b7'
bloom_tokenizer = AutoTokenizer.from_pretrained(bloom_model_name)
bloom_model = AutoModelForCausalLM.from_pretrained(bloom_model_name).to(device)
# Preload models with a dummy pass to improve first-time loading
def preload_models():
dummy_context = "Este es un contexto de prueba."
dummy_question = "¿Cuál es el propósito de este contexto?"
# Run each model with a dummy input to initialize them
inputs = confli_tokenizer_spanish(dummy_question, dummy_context, return_tensors='tf')
_ = confli_model_spanish_qa(inputs)
inputs = beto_tokenizer_spanish(dummy_question, dummy_context, return_tensors='tf')
_ = beto_model_spanish_qa(inputs)
inputs = confli_sqac_tokenizer_spanish(dummy_question, dummy_context, return_tensors='tf')
_ = confli_sqac_model_spanish_qa(inputs)
inputs = beto_sqac_tokenizer_spanish(dummy_question, dummy_context, return_tensors='tf')
_ = beto_sqac_model_spanish_qa(inputs)
preload_models() # Initialize models
# Error handling function
def handle_error_message(e, default_limit=512):
error_message = str(e)
pattern = re.compile(r"The size of tensor a \((\d+)\) must match the size of tensor b \((\d+)\)")
match = pattern.search(error_message)
if match:
number_1, number_2 = match.groups()
return f"<span style='color: red; font-weight: bold;'>Error: Text Input is over limit where inserted text size {number_1} is larger than model limits of {number_2}</span>"
return f"<span style='color: red; font-weight: bold;'>Error: Text Input is over limit where inserted text size is larger than model limits of {default_limit}</span>"
# Spanish QA functions
def question_answering_spanish(context, question):
try:
inputs = confli_tokenizer_spanish(question, context, return_tensors='tf', truncation=True)
outputs = confli_model_spanish_qa(inputs)
answer_start = tf.argmax(outputs.start_logits, axis=1).numpy()[0]
answer_end = tf.argmax(outputs.end_logits, axis=1).numpy()[0] + 1
answer = confli_tokenizer_spanish.convert_tokens_to_string(confli_tokenizer_spanish.convert_ids_to_tokens(inputs['input_ids'].numpy()[0][answer_start:answer_end]))
return f"<span style='color: green; font-weight: bold;'>{answer}</span>"
except Exception as e:
return handle_error_message(e)
def beto_question_answering_spanish(context, question):
try:
inputs = beto_tokenizer_spanish(question, context, return_tensors='tf', truncation=True)
outputs = beto_model_spanish_qa(inputs)
answer_start = tf.argmax(outputs.start_logits, axis=1).numpy()[0]
answer_end = tf.argmax(outputs.end_logits, axis=1).numpy()[0] + 1
answer = beto_tokenizer_spanish.convert_tokens_to_string(beto_tokenizer_spanish.convert_ids_to_tokens(inputs['input_ids'].numpy()[0][answer_start:answer_end]))
return f"<span style='color: blue; font-weight: bold;'>{answer}</span>"
except Exception as e:
return handle_error_message(e)
def confli_sqac_question_answering_spanish(context, question):
try:
inputs = confli_sqac_tokenizer_spanish(question, context, return_tensors='tf', truncation=True)
outputs = confli_sqac_model_spanish_qa(inputs)
answer_start = tf.argmax(outputs.start_logits, axis=1).numpy()[0]
answer_end = tf.argmax(outputs.end_logits, axis=1).numpy()[0] + 1
answer = confli_sqac_tokenizer_spanish.convert_tokens_to_string(confli_sqac_tokenizer_spanish.convert_ids_to_tokens(inputs['input_ids'].numpy()[0][answer_start:answer_end]))
return f"<span style='color: teal; font-weight: bold;'>{answer}</span>"
except Exception as e:
return handle_error_message(e)
def beto_sqac_question_answering_spanish(context, question):
try:
inputs = beto_sqac_tokenizer_spanish(question, context, return_tensors='tf', truncation=True)
outputs = beto_sqac_model_spanish_qa(inputs)
answer_start = tf.argmax(outputs.start_logits, axis=1).numpy()[0]
answer_end = tf.argmax(outputs.end_logits, axis=1).numpy()[0] + 1
answer = beto_sqac_tokenizer_spanish.convert_tokens_to_string(beto_sqac_tokenizer_spanish.convert_ids_to_tokens(inputs['input_ids'].numpy()[0][answer_start:answer_end]))
return f"<span style='color: brown; font-weight: bold;'>{answer}</span>"
except Exception as e:
return handle_error_message(e)
def gpt2_spanish_question_answering(context, question):
try:
prompt = f"Contexto:\n{context}\n\nPregunta:\n{question}\n\nRespuesta:"
inputs = gpt2_spanish_tokenizer(prompt, return_tensors='pt').to(device)
outputs = gpt2_spanish_model.generate(
inputs['input_ids'],
max_length=inputs['input_ids'].shape[1] + 50,
num_return_sequences=1,
pad_token_id=gpt2_spanish_tokenizer.eos_token_id,
do_sample=True,
top_k=40,
temperature=0.8
)
answer = gpt2_spanish_tokenizer.decode(outputs[0], skip_special_tokens=True)
answer = answer.split("Respuesta:")[-1].strip()
return f"<span style='color: orange; font-weight: bold;'>{answer}</span>"
except Exception as e:
return handle_error_message(e)
def bloom_question_answering(context, question):
try:
prompt = f"Contexto:\n{context}\n\nPregunta:\n{question}\n\nRespuesta:"
inputs = bloom_tokenizer(prompt, return_tensors='pt').to(device)
outputs = bloom_model.generate(
inputs['input_ids'],
max_length=inputs['input_ids'].shape[1] + 50,
num_return_sequences=1,
pad_token_id=bloom_tokenizer.eos_token_id,
do_sample=True,
top_k=40,
temperature=0.8
)
answer = bloom_tokenizer.decode(outputs[0], skip_special_tokens=True)
answer = answer.split("Respuesta:")[-1].strip()
return f"<span style='color: purple; font-weight: bold;'>{answer}</span>"
except Exception as e:
return handle_error_message(e)
# Main function for Spanish QA
def compare_question_answering_spanish(context, question):
confli_answer_spanish = question_answering_spanish(context, question)
beto_answer_spanish = beto_question_answering_spanish(context, question)
confli_sqac_answer_spanish = confli_sqac_question_answering_spanish(context, question)
beto_sqac_answer_spanish = beto_sqac_question_answering_spanish(context, question)
gpt2_answer_spanish = gpt2_spanish_question_answering(context, question)
bloom_answer = bloom_question_answering(context, question)
return f"""
<div>
<h2 style='color: #2e8b57; font-weight: bold;'>Respuestas:</h2>
</div><br>
<div>
<strong>ConfliBERT-Spanish-Beto-Cased-NewsQA:</strong><br>{confli_answer_spanish}</div><br>
<div>
<strong>Beto-Spanish-Cased-NewsQA:</strong><br>{beto_answer_spanish}
</div><br>
<div>
<strong>ConfliBERT-Spanish-Beto-Cased-SQAC:</strong><br>{confli_sqac_answer_spanish}
</div><br>
<div>
<strong>Beto-Spanish-Cased-SQAC:</strong><br>{beto_sqac_answer_spanish}
</div><br>
<div>
<strong>GPT-2-Small-Spanish:</strong><br>{gpt2_answer_spanish}
</div><br>
<div>
<strong>BLOOM-1.7B:</strong><br>{bloom_answer}
</div><br>
<div>
<strong>Información del modelo:</strong><br>
ConfliBERT-Spanish-Beto-Cased-NewsQA: <a href='https://huggingface.co/salsarra/ConfliBERT-Spanish-Beto-Cased-NewsQA' target='_blank'>salsarra/ConfliBERT-Spanish-Beto-Cased-NewsQA</a><br>
Beto-Spanish-Cased-NewsQA: <a href='https://huggingface.co/salsarra/Beto-Spanish-Cased-NewsQA' target='_blank'>salsarra/Beto-Spanish-Cased-NewsQA</a><br>
ConfliBERT-Spanish-Beto-Cased-SQAC: <a href='https://huggingface.co/salsarra/ConfliBERT-Spanish-Beto-Cased-SQAC' target='_blank'>salsarra/ConfliBERT-Spanish-Beto-Cased-SQAC</a><br>
Beto-Spanish-Cased-SQAC: <a href='https://huggingface.co/salsarra/Beto-Spanish-Cased-SQAC' target='_blank'>salsarra/Beto-Spanish-Cased-SQAC</a><br>
GPT-2-Small-Spanish: <a href='https://huggingface.co/datificate/gpt2-small-spanish' target='_blank'>datificate GPT-2 Small Spanish</a><br>
BLOOM-1.7B: <a href='https://huggingface.co/bigscience/bloom-1b7' target='_blank'>bigscience BLOOM-1.7B</a><br>
</div>
"""
# CSS for Gradio interface
css_styles = """
body {
background-color: #f0f8ff;
font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
}
h1 a {
color: #2e8b57;
text-align: center;
font-size: 2em;
text-decoration: none;
}
h1 a:hover {
color: #ff8c00;
}
h2 {
color: #ff8c00;
text-align: center;
font-size: 1.5em;
}
.description-light {
color: black;
display: block;
font-size: 1em;
text-align: center;
}
.description-dark {
color: white;
display: none;
font-size: 1em;
text-align: center;
}
@media (prefers-color-scheme: dark) {
.description-light {
display: none;
}
.description-dark {
display: block;
}
}
.footer {
text-align: center;
margin-top: 10px;
font-size: 0.9em;
color: #666;
width: 100%;
}
.footer a {
color: #2e8b57;
font-weight: bold;
text-decoration: none;
}
.footer a:hover {
text-decoration: underline;
}
"""
# Define the Gradio interface with footer directly in the layout
demo = gr.Interface(
fn=compare_question_answering_spanish,
inputs=[
gr.Textbox(lines=5, placeholder="Ingrese el contexto aquí...", label="Contexto"),
gr.Textbox(lines=2, placeholder="Ingrese su pregunta aquí...", label="Pregunta")
],
outputs=gr.HTML(label="Salida"),
title="<a href='https://eventdata.utdallas.edu/conflibert/' target='_blank'>ConfliBERT-Spanish-QA</a>",
description="""
<span class="description-light">Compare respuestas entre los modelos ConfliBERT, BETO, ConfliBERT SQAC, Beto SQAC, GPT-2 Small Spanish y BLOOM-1.7B para preguntas en español.</span>
<span class="description-dark">Compare respuestas entre los modelos ConfliBERT, BETO, ConfliBERT SQAC, Beto SQAC, GPT-2 Small Spanish y BLOOM-1.7B para preguntas en español.</span>
""",
css=css_styles,
allow_flagging="never",
# Footer HTML with centered, green links
article="""
<div class='footer'>
<a href='https://eventdata.utdallas.edu/' style='color: #2e8b57; font-weight: bold;'>UTD Event Data</a> |
<a href='https://www.utdallas.edu/' style='color: #2e8b57; font-weight: bold;'>University of Texas at Dallas</a>
</div>
<div class='footer'>
Developed By: <a href='https://www.linkedin.com/in/sultan-alsarra-phd-56977a63/' target='_blank' style='color: #2e8b57; font-weight: bold;'>Sultan Alsarra</a>
</div>
"""
)
# Launch the Gradio demo
demo.launch(share=True)