from fastapi import FastAPI from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline app = FastAPI() model_id = "gpt2" # Replace with your desired model tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) generator = pipeline("text-generation", model=model, tokenizer=tokenizer) @app.get("/") def generate_text(prompt: str): result = generator(prompt) return {"generated_text": result[0]["generated_text"]}