|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
import os |
|
import time |
|
|
|
|
|
hf_token = os.getenv("HF_API_TOKEN") |
|
|
|
|
|
class ModelHandler: |
|
def __init__(self, model_names, token): |
|
self.clients = {model_key: InferenceClient(model_name, token=token) for model_key, model_name in model_names.items()} |
|
self.current_model = list(model_names.keys())[0] |
|
self.conversation_history = [] |
|
|
|
def switch_model(self, model_key): |
|
if model_key in self.clients: |
|
self.current_model = model_key |
|
else: |
|
raise ValueError(f"Modelo {model_key} no est谩 disponible.") |
|
|
|
def generate_response(self, input_text): |
|
|
|
self.conversation_history.append({"role": "user", "content": input_text}) |
|
prompt = f"Historial de conversaci贸n: {self.conversation_history}\nPregunta: {input_text}" |
|
|
|
try: |
|
messages = [{"role": "user", "content": prompt}] |
|
client = self.clients[self.current_model] |
|
response = client.chat_completion(messages=messages, max_tokens=500) |
|
if hasattr(response, 'choices') and response.choices: |
|
generated_text = response.choices[0].message.content |
|
self.conversation_history.append({"role": "assistant", "content": generated_text}) |
|
return generated_text |
|
else: |
|
return str(response) |
|
except Exception as e: |
|
return f"Error al realizar la inferencia: {e}" |
|
|
|
def analyze_emotion(self, input_text): |
|
|
|
emotion_translation = { |
|
"joy": "Alegr铆a", |
|
"anger": "Enojo", |
|
"fear": "Miedo", |
|
"sadness": "Tristeza", |
|
"love": "Amor", |
|
"surprise": "Sorpresa" |
|
} |
|
|
|
try: |
|
client = InferenceClient("bhadresh-savani/distilbert-base-uncased-emotion", token=hf_token) |
|
response = client.text_classification(input_text) |
|
|
|
|
|
emotions = [ |
|
f"{emotion_translation[label['label']]}: {label['score']:.2%}" |
|
for label in response |
|
] |
|
return "\n".join(emotions) |
|
except Exception as e: |
|
return f"Error al analizar la emoci贸n: {e}" |
|
|
|
|
|
model_names = { |
|
"CHATBOT": "microsoft/Phi-3-mini-4k-instruct" |
|
} |
|
|
|
|
|
model_handler = ModelHandler(model_names, hf_token) |
|
|
|
|
|
def generate_image_with_progress(prompt): |
|
try: |
|
client = InferenceClient("stabilityai/stable-diffusion-2-1-base", token=hf_token, timeout=None) |
|
|
|
|
|
for progress in range(0, 101, 20): |
|
time.sleep(0.5) |
|
yield f"Generando imagen... {progress}% completado", None |
|
|
|
image = client.text_to_image(prompt, width=512, height=512) |
|
yield "Imagen generada con 茅xito", image |
|
except Exception as e: |
|
yield f"Error al generar la imagen: {e}", None |
|
|
|
|
|
with gr.Blocks(title="Multi-Model LLM Chatbot with Image Generation and Emotion Analysis") as demo: |
|
gr.Markdown( |
|
""" |
|
## Chatbot Multi-Modelo LLM con Generaci贸n de Im谩genes y An谩lisis de Emociones |
|
Este chatbot permite elegir entre m煤ltiples modelos de lenguaje para responder preguntas, recordar la conversaci贸n o analizar emociones en los textos. |
|
""" |
|
) |
|
with gr.Row(): |
|
model_dropdown = gr.Dropdown( |
|
choices=list(model_names.keys()) + ["Generaci贸n de Im谩genes", "An谩lisis de Emociones"], |
|
value="CHATBOT", |
|
label="Seleccionar Acci贸n/Modelo", |
|
interactive=True |
|
) |
|
with gr.Row(): |
|
with gr.Column(): |
|
input_text = gr.Textbox( |
|
lines=5, |
|
placeholder="Escribe tu consulta o descripci贸n para la imagen...", |
|
label="Entrada" |
|
) |
|
with gr.Column(): |
|
output_display = gr.Textbox( |
|
lines=5, |
|
label="Estado", |
|
interactive=False |
|
) |
|
output_image = gr.Image( |
|
label="Imagen Generada", |
|
interactive=False |
|
) |
|
submit_button = gr.Button("Enviar") |
|
|
|
|
|
def process_input(selected_action, user_input): |
|
try: |
|
if selected_action == "Generaci贸n de Im谩genes": |
|
progress_generator = generate_image_with_progress(user_input) |
|
last_status = None |
|
last_image = None |
|
for status, image in progress_generator: |
|
last_status = status |
|
last_image = image |
|
return last_status, last_image |
|
elif selected_action == "An谩lisis de Emociones": |
|
emotion_result = model_handler.analyze_emotion(user_input) |
|
return f"Emoci贸n detectada:\n{emotion_result}", None |
|
else: |
|
model_handler.switch_model(selected_action) |
|
response = model_handler.generate_response(user_input) |
|
return response, None |
|
except Exception as e: |
|
return f"Error: {e}", None |
|
|
|
|
|
submit_button.click( |
|
fn=process_input, |
|
inputs=[model_dropdown, input_text], |
|
outputs=[output_display, output_image] |
|
) |
|
|
|
|
|
demo.launch() |
|
|