from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse from pydantic import BaseModel from vllm import AsyncLLMEngine, SamplingParams from vllm.engine.arg_utils import AsyncEngineArgs import asyncio import json import uuid app = FastAPI() # Initialize the AsyncLLMEngine # Replace 'your-model-path' with the actual path or name of your model engine = AsyncLLMEngine.from_engine_args( AsyncEngineArgs( model='microsoft/Phi-3-mini-4k-instruct', max_num_batched_tokens=512, # Reduced for T4 max_num_seqs=16, # Reduced for T4 gpu_memory_utilization=0.85, # Slightly increased, adjust if needed max_model_len=512, # Phi-3-mini-4k context length enforce_eager=True, # Disable CUDA graph dtype='half', # Use half precision ) ) class GenerationRequest(BaseModel): prompt: str max_tokens: int = 100 temperature: float = 0.7 async def generate_stream(prompt: str, max_tokens: int, temperature: float): sampling_params = SamplingParams( temperature=temperature, max_tokens=max_tokens ) request_id = str(uuid.uuid4()) async for output in engine.generate(prompt, sampling_params, request_id=request_id): # True enables streaming yield f"data: {json.dumps({'text': output.outputs[0].text})}\n\n" yield "data: [DONE]\n\n" @app.get("/") def greet_json(): return {"Hello": "World!"} @app.post("/generate-stream") async def generate_text(request: Request): try: data = await request.json() gen_request = GenerationRequest(**data) return StreamingResponse( generate_stream(gen_request.prompt, gen_request.max_tokens, gen_request.temperature), media_type="text/event-stream" ) except Exception as e: return StreamingResponse( iter([f"data: {json.dumps({'error': str(e)})}\n\n"]), media_type="text/event-stream" )