Spaces:
Sleeping
Sleeping
File size: 1,184 Bytes
4ad23c1 3fb5e57 928cd07 1d5b573 3fb5e57 43f1789 928cd07 43f1789 3fb5e57 d5afe56 3fb5e57 d5afe56 3fb5e57 4ad23c1 3fb5e57 4ad23c1 3fb5e57 4ad23c1 1d5b573 4ad23c1 3fb5e57 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import gradio as gr
from transformers import T5Tokenizer, T5ForConditionalGeneration
import torch
# Cargar modelo y tokenizer
model_name = "google/flan-t5-base"
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)
def get_system_prompt():
with open("prompt.txt", "r", encoding="utf-8") as f:
return f.read().strip()
def generate_response(user_input):
system_prompt = get_system_prompt()
full_prompt = f"{system_prompt}\n\nUsuario: {user_input}\nBITER:"
inputs = tokenizer(full_prompt, return_tensors="pt")
output = model.generate(**inputs, max_new_tokens=200)
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
return decoded_output.strip()
# Interfaz Gradio para probar el modelo directamente en Hugging Face
demo = gr.Interface(
fn=generate_response,
inputs=gr.Textbox(lines=2, placeholder="Escribe tu pregunta..."),
outputs=gr.Textbox(),
title="BITER - Mentor IA para Emprendedores",
description="Respuestas rápidas, estratégicas y en español. Como un CEO que te asesora al instante.",
)
if __name__ == "__main__":
demo.launch()
|