File size: 493 Bytes
fd3f98d
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
from fastapi import FastAPI 
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

app = FastAPI() 

model_id = "gpt2"  # Replace with your desired model
tokenizer = AutoTokenizer.from_pretrained(model_id) 
model = AutoModelForCausalLM.from_pretrained(model_id) 
generator = pipeline("text-generation", model=model, tokenizer=tokenizer) 

@app.get("/")
def generate_text(prompt: str): 
    result = generator(prompt)
    return {"generated_text": result[0]["generated_text"]}