import gradio as gr from transformers import T5Tokenizer, T5ForConditionalGeneration import torch # Cargar modelo y tokenizer model_name = "google/flan-t5-base" tokenizer = T5Tokenizer.from_pretrained(model_name) model = T5ForConditionalGeneration.from_pretrained(model_name) def get_system_prompt(): with open("prompt.txt", "r", encoding="utf-8") as f: return f.read().strip() def generate_response(user_input): system_prompt = get_system_prompt() full_prompt = f"{system_prompt}\n\nUsuario: {user_input}\nBITER:" inputs = tokenizer(full_prompt, return_tensors="pt") output = model.generate(**inputs, max_new_tokens=200) decoded_output = tokenizer.decode(output[0], skip_special_tokens=True) return decoded_output.strip() # Interfaz Gradio para probar el modelo directamente en Hugging Face demo = gr.Interface( fn=generate_response, inputs=gr.Textbox(lines=2, placeholder="Escribe tu pregunta..."), outputs=gr.Textbox(), title="BITER - Mentor IA para Emprendedores", description="Respuestas rápidas, estratégicas y en español. Como un CEO que te asesora al instante.", ) if __name__ == "__main__": demo.launch()