pablofr's picture
Update app.py
fa9b96e verified
import gradio as gr
import tensorflow as tf
from tensorflow.keras.preprocessing.text import tokenizer_from_json
from tensorflow.keras.preprocessing.sequence import pad_sequences
import json
import numpy as np
import os
# Función para cargar archivos con manejo de errores
def load_file(filename):
try:
filepath = os.path.join(os.path.dirname(__file__), filename)
print(f"Attempting to load {filepath}")
if not os.path.exists(filepath):
print(f"File not found: {filepath}")
return None
return filepath
except Exception as e:
print(f"Error loading {filename}: {str(e)}")
return None
# Cargar el modelo
model_path = load_file('polarisatie_model.h5')
if model_path:
try:
model = tf.keras.models.load_model(model_path)
print("Model loaded successfully")
except Exception as e:
print(f"Error loading model: {str(e)}")
model = None
else:
model = None
# Cargar el tokenizador
tokenizer_path = load_file('tokenizer.json')
if tokenizer_path:
try:
with open(tokenizer_path, 'r') as f:
tokenizer_json = json.load(f)
tokenizer = tokenizer_from_json(json.dumps(tokenizer_json))
print("Tokenizer loaded successfully")
except Exception as e:
print(f"Error loading tokenizer: {str(e)}")
tokenizer = None
else:
tokenizer = None
# Cargar max_length
max_length_path = load_file('max_length.txt')
if max_length_path:
try:
with open(max_length_path, 'r') as f:
max_length = int(f.read().strip())
print(f"Max length loaded: {max_length}")
except Exception as e:
print(f"Error loading max_length: {str(e)}")
max_length = 100 # valor por defecto
else:
max_length = 100 # valor por defecto
def preprocess_text(text):
if tokenizer is None:
return None
sequence = tokenizer.texts_to_sequences([text])
padded = pad_sequences(sequence, maxlen=max_length)
return padded
def predict_polarization(text):
if model is None or tokenizer is None:
return {"Error": "Model or tokenizer not loaded correctly"}
preprocessed_text = preprocess_text(text)
if preprocessed_text is None:
return {"Error": "Failed to preprocess text"}
prediction = model.predict(preprocessed_text)
probability = float(prediction[0][1])
is_polarizing = bool(probability > 0.5)
response = "Polariserend" if is_polarizing else "Niet polariserend"
return {
"Is Polarizing": is_polarizing,
"Probability": f"{probability:.2%}",
"Response": response
}
# Crear la interfaz Gradio
iface = gr.Interface(
fn=predict_polarization,
inputs=gr.Textbox(lines=2, placeholder="Voer hier je Nederlandse tekst in..."),
outputs=gr.JSON(),
title="Dutch Text Polarization Detector",
description="Voer een Nederlandse tekst in om te bepalen of deze polariserend is.",
examples=[
["Dit is een neutrale zin."],
["Alle politici zijn leugenaars en dieven!"],
["Het weer is vandaag erg mooi."],
["Die groep mensen is de oorzaak van al onze problemen."]
]
)
# Lanzar la app
iface.launch()