diksha
Runpod model testing
ce587a5
import os
import runpod
from utils import JobInput
from engine import vLLMEngine, OpenAIvLLMEngine
import logging
import asyncio
import nest_asyncio
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Apply nest_asyncio to allow nested event loops
nest_asyncio.apply()
async def handler(job):
try:
logging.info("Received job: %s", job) # Log the received job
job_input = JobInput(job["input"])
# Log the input details
logging.info("Parsed job input: %s", job_input)
model_name = job_input.openai_input['model']
os.environ["MODEL_NAME"] = model_name
logging.info(f"MODEL_NAME set to: {model_name}")
# Initialize engines and log their creation
logging.info("Initializing vLLMEngine.")
vllm_engine = vLLMEngine()
logging.info("vLLMEngine initialized successfully.")
logging.info("Initializing OpenAIvLLMEngine.")
OpenAIvLLM = OpenAIvLLMEngine(vllm_engine)
logging.info("OpenAIvLLMEngine initialized successfully.")
# Determine which engine to use and log the decision
engine = OpenAIvLLM if job_input.openai_route else vllm_engine
engine_type = "OpenAIvLLM" if job_input.openai_route else "vLLM"
logging.info(f"Using engine: {engine_type}")
# Generate results and log the start of the generation process
logging.info("Starting to generate results.")
results_generator = engine.generate(job_input)
async for batch in results_generator:
logging.info("Yielding batch: %s", batch) # Log each yielded batch
yield batch
logging.info("Finished processing job: %s", job) # Log completion of job processing
except Exception as e:
logging.error(f"Error in handler: {str(e)}")
raise
def start_handler():
# Wrapper function to handle event loop creation
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
runpod.serverless.start(
{
"handler": handler,
"concurrency_modifier": lambda x: 300,
"return_aggregate_stream": True,
}
)
if __name__ == "__main__":
start_handler()