File size: 4,428 Bytes
79adf4f
 
 
1812733
79adf4f
 
 
 
 
 
 
1812733
79adf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c4ef265
79adf4f
 
 
 
 
1812733
 
8018226
ca73fe2
8018226
 
ca73fe2
1812733
 
 
 
 
 
c4ef265
1812733
8018226
1812733
8018226
 
 
79adf4f
 
8018226
 
 
79adf4f
 
 
 
8018226
79adf4f
8018226
79adf4f
 
 
 
 
 
8018226
 
79adf4f
 
8018226
79adf4f
1812733
 
8018226
 
 
1812733
79adf4f
 
 
 
a22bb25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79adf4f
 
 
 
 
1812733
79adf4f
 
 
c4ef265
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import gradio as gr
from huggingface_hub import InferenceClient
import os
import time

# Obt茅n el token de manera segura desde el entorno
hf_token = os.getenv("HF_API_TOKEN")

# Clase para manejar m煤ltiples modelos
class ModelHandler:
    def __init__(self, model_names, token):
        self.clients = {model_name: InferenceClient(model_name, token=token) for model_name in model_names}
        self.current_model = model_names[0]

    def switch_model(self, model_name):
        if model_name in self.clients:
            self.current_model = model_name
        else:
            raise ValueError(f"Modelo {model_name} no est谩 disponible.")

    def generate_response(self, input_text):
        prompt = f"Debes de responder a cualquier pregunta:\nPregunta: {input_text}"
        try:
            messages = [{"role": "user", "content": prompt}]
            client = self.clients[self.current_model]
            response = client.chat_completion(messages=messages, max_tokens=500)
            if hasattr(response, 'choices') and response.choices:
                return response.choices[0].message.content
            else:
                return str(response)
        except Exception as e:
            return f"Error al realizar la inferencia: {e}"

# Lista de modelos disponibles
model_names = [
    "microsoft/Phi-3-mini-4k-instruct"
]

# Inicializa el manejador de modelos
model_handler = ModelHandler(model_names, hf_token)

# Define la funci贸n para generaci贸n de im谩genes con progreso
def generate_image_with_progress(prompt):
    """
    Genera una imagen utilizando el modelo de "stabilityai/stable-diffusion-2" y muestra un progreso.
    """
    try:
        client = InferenceClient("stabilityai/stable-diffusion-2", token=hf_token)
        
        # Simular progreso
        for progress in range(0, 101, 20):
            time.sleep(0.5)
            yield f"Generando imagen... {progress}% completado", None

        image = client.text_to_image(prompt, width=512, height=512)
        yield "Imagen generada con 茅xito", image
    except Exception as e:
        yield f"Error al generar la imagen: {e}", None

# Configura la interfaz en Gradio con selecci贸n de modelos y generaci贸n de im谩genes
with gr.Blocks(title="Multi-Model LLM Chatbot with Image Generation") as demo:
    gr.Markdown(
        """
        ## Chatbot Multi-Modelo LLM con Generaci贸n de Im谩genes
        Este chatbot permite elegir entre m煤ltiples modelos de lenguaje para responder preguntas o generar im谩genes 
        a partir de descripciones.
        """
    )
    with gr.Row():
        model_dropdown = gr.Dropdown(
            choices=model_names + ["Generaci贸n de Im谩genes"],
            value=model_names[0],
            label="Seleccionar Acci贸n/Modelo",
            interactive=True
        )
    with gr.Row():
        with gr.Column():
            input_text = gr.Textbox(
                lines=5,
                placeholder="Escribe tu consulta o descripci贸n para la imagen...",
                label="Entrada"
            )
        with gr.Column():
            output_display = gr.Textbox(
                lines=5,
                label="Estado",
                interactive=False
            )
            output_image = gr.Image(
                label="Imagen Generada",
                interactive=False
            )
    submit_button = gr.Button("Enviar")
    
    # Define la funci贸n de actualizaci贸n
    def process_input(selected_action, user_input):
        try:
            if selected_action == "Generaci贸n de Im谩genes":
                # Manejamos el generador de progreso
                progress_generator = generate_image_with_progress(user_input)
                last_status = None
                last_image = None
                for status, image in progress_generator:
                    last_status = status
                    last_image = image
                return last_status, last_image
            else:
                model_handler.switch_model(selected_action)
                response = model_handler.generate_response(user_input)
                return response, None
        except Exception as e:
            return f"Error: {e}", None
    
    # Conecta la funci贸n a los componentes
    submit_button.click(
        fn=process_input,
        inputs=[model_dropdown, input_text],
        outputs=[output_display, output_image]
    )

# Lanza la interfaz
demo.launch()