Spaces:
Runtime error
Runtime error
File size: 1,331 Bytes
4b8202a 38d9b9a 4b8202a 38d9b9a 4b8202a 38d9b9a 2d8d396 4b8202a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
app = FastAPI()
class TextGenerationRequest(BaseModel):
prompt: str
max_length: int = 100
temperature: float = 0.7
# Load model and tokenizer (force CPU usage)
model_name = "unsloth/Qwen2.5-7B-bnb-4bit"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
model_name,
trust_remote_code=True,
torch_dtype=torch.float32, # Change to float32 for CPU
device_map="cpu" # Force CPU usage
)
@app.get("/", tags=["Home"])
def api_home():
return {'detail': 'Welcome to FastAPI TextGen Tutorial!'}
@app.post("/generate")
async def generate_text(request: TextGenerationRequest):
try:
inputs = tokenizer(request.prompt, return_tensors="pt").to("cpu") # Move to CPU
outputs = model.generate(
inputs.input_ids,
max_length=request.max_length,
temperature=request.temperature,
do_sample=True,
)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return {"generated_text": generated_text}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
|