import gradio as gr | |
from transformers import pipeline | |
# Modello più piccolo e CPU esplicita | |
generator = pipeline("text-generation", model="sshleifer/tiny-gpt2", device="cpu") | |
def chat_function(message, history): | |
response = generator(message, max_length=50, num_return_sequences=1)[0]["generated_text"] | |
return history + [{"role": "user", "content": message}, {"role": "assistant", "content": response}] | |
interface = gr.ChatInterface( | |
fn=chat_function, | |
chatbot=gr.Chatbot(type="messages"), | |
title="Chatbot IA" | |
) | |
interface.launch() |