|
import os |
|
import runpod |
|
from utils import JobInput |
|
from engine import vLLMEngine, OpenAIvLLMEngine |
|
import logging |
|
import asyncio |
|
import nest_asyncio |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
|
|
|
nest_asyncio.apply() |
|
|
|
async def handler(job): |
|
try: |
|
logging.info("Received job: %s", job) |
|
job_input = JobInput(job["input"]) |
|
|
|
|
|
logging.info("Parsed job input: %s", job_input) |
|
|
|
model_name = job_input.openai_input['model'] |
|
os.environ["MODEL_NAME"] = model_name |
|
logging.info(f"MODEL_NAME set to: {model_name}") |
|
|
|
|
|
logging.info("Initializing vLLMEngine.") |
|
vllm_engine = vLLMEngine() |
|
logging.info("vLLMEngine initialized successfully.") |
|
|
|
logging.info("Initializing OpenAIvLLMEngine.") |
|
OpenAIvLLM = OpenAIvLLMEngine(vllm_engine) |
|
logging.info("OpenAIvLLMEngine initialized successfully.") |
|
|
|
|
|
engine = OpenAIvLLM if job_input.openai_route else vllm_engine |
|
engine_type = "OpenAIvLLM" if job_input.openai_route else "vLLM" |
|
logging.info(f"Using engine: {engine_type}") |
|
|
|
|
|
logging.info("Starting to generate results.") |
|
results_generator = engine.generate(job_input) |
|
|
|
async for batch in results_generator: |
|
logging.info("Yielding batch: %s", batch) |
|
yield batch |
|
|
|
logging.info("Finished processing job: %s", job) |
|
|
|
except Exception as e: |
|
logging.error(f"Error in handler: {str(e)}") |
|
raise |
|
|
|
def start_handler(): |
|
|
|
try: |
|
loop = asyncio.get_event_loop() |
|
except RuntimeError: |
|
loop = asyncio.new_event_loop() |
|
asyncio.set_event_loop(loop) |
|
|
|
runpod.serverless.start( |
|
{ |
|
"handler": handler, |
|
"concurrency_modifier": lambda x: 300, |
|
"return_aggregate_stream": True, |
|
} |
|
) |
|
|
|
if __name__ == "__main__": |
|
start_handler() |