File size: 2,343 Bytes
ce587a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import os
import runpod
from utils import JobInput
from engine import vLLMEngine, OpenAIvLLMEngine
import logging
import asyncio
import nest_asyncio

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# Apply nest_asyncio to allow nested event loops
nest_asyncio.apply()

async def handler(job):
    try:
        logging.info("Received job: %s", job)  # Log the received job
        job_input = JobInput(job["input"])
        
        # Log the input details
        logging.info("Parsed job input: %s", job_input)

        model_name = job_input.openai_input['model']
        os.environ["MODEL_NAME"] = model_name
        logging.info(f"MODEL_NAME set to: {model_name}")

        # Initialize engines and log their creation
        logging.info("Initializing vLLMEngine.")
        vllm_engine = vLLMEngine()
        logging.info("vLLMEngine initialized successfully.")

        logging.info("Initializing OpenAIvLLMEngine.")
        OpenAIvLLM = OpenAIvLLMEngine(vllm_engine)
        logging.info("OpenAIvLLMEngine initialized successfully.")

        # Determine which engine to use and log the decision
        engine = OpenAIvLLM if job_input.openai_route else vllm_engine
        engine_type = "OpenAIvLLM" if job_input.openai_route else "vLLM"
        logging.info(f"Using engine: {engine_type}")

        # Generate results and log the start of the generation process
        logging.info("Starting to generate results.")
        results_generator = engine.generate(job_input)

        async for batch in results_generator:
            logging.info("Yielding batch: %s", batch)  # Log each yielded batch
            yield batch

        logging.info("Finished processing job: %s", job)  # Log completion of job processing

    except Exception as e:
        logging.error(f"Error in handler: {str(e)}")
        raise

def start_handler():
    # Wrapper function to handle event loop creation
    try:
        loop = asyncio.get_event_loop()
    except RuntimeError:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

    runpod.serverless.start(
        {
            "handler": handler,
            "concurrency_modifier": lambda x: 300,
            "return_aggregate_stream": True,
        }
    )

if __name__ == "__main__":
    start_handler()