|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
import os |
|
|
|
|
|
hf_token = os.getenv("HF_API_TOKEN") |
|
client = InferenceClient( |
|
"microsoft/Phi-3-mini-4k-instruct", |
|
token=hf_token |
|
) |
|
|
|
|
|
def generate_response(input_text): |
|
prompt = f"Debes de responder a cualquier pregunta:\nPregunta: {input_text}" |
|
try: |
|
|
|
messages = [{"role": "user", "content": prompt}] |
|
response = client.chat_completion(messages=messages, max_tokens=500) |
|
|
|
|
|
if hasattr(response, 'choices') and response.choices: |
|
generated_text = response.choices[0].message.content |
|
else: |
|
generated_text = str(response) |
|
|
|
return generated_text |
|
except Exception as e: |
|
return f"Error al realizar la inferencia: {e}" |
|
|
|
|
|
with gr.Blocks(title="LLM Chatbot con API de Inferencia") as demo: |
|
gr.Markdown( |
|
""" |
|
## Chatbot LLM - Pregunta y Respuesta |
|
Este chatbot utiliza un modelo de lenguaje para responder preguntas. |
|
Ingresa tu consulta en el 谩rea de texto a continuaci贸n y presiona el bot贸n de enviar para obtener una respuesta. |
|
""" |
|
) |
|
with gr.Row(): |
|
with gr.Column(): |
|
input_text = gr.Textbox( |
|
lines=5, |
|
placeholder="Escribe tu pregunta aqu铆...", |
|
label="Pregunta" |
|
) |
|
with gr.Column(): |
|
output_text = gr.Textbox( |
|
lines=5, |
|
label="Respuesta", |
|
interactive=False |
|
) |
|
submit_button = gr.Button("Enviar") |
|
|
|
|
|
submit_button.click(fn=generate_response, inputs=input_text, outputs=output_text) |
|
|
|
|
|
demo.launch() |