File size: 3,020 Bytes
8819832
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
"""
Background worker for the AIBOM Generator.

This module provides a background worker that can be used to process
AIBOM generation tasks asynchronously.
"""

import logging
import os
import time
from typing import Dict, List, Optional, Any

from aibom_generator.generator import AIBOMGenerator
from aibom_generator.utils import setup_logging, calculate_completeness_score

# Set up logging
setup_logging()
logger = logging.getLogger(__name__)


class Worker:
    """
    Background worker for AIBOM generation.
    """
    
    def __init__(
        self,
        poll_interval: int = 60,
        hf_token: Optional[str] = None,
        inference_model_url: Optional[str] = None,
        use_inference: bool = True,
        cache_dir: Optional[str] = None,
    ):
        """
        Initialize the worker.
        
        Args:
            poll_interval: Interval in seconds to poll for new tasks
            hf_token: Hugging Face API token
            inference_model_url: URL of the inference model service
            use_inference: Whether to use the inference model
            cache_dir: Directory to cache API responses and model cards
        """
        self.poll_interval = poll_interval
        self.generator = AIBOMGenerator(
            hf_token=hf_token,
            inference_model_url=inference_model_url,
            use_inference=use_inference,
            cache_dir=cache_dir,
        )
        self.running = False
    
    def start(self):
        """Start the worker."""
        self.running = True
        logger.info("Worker started")
        
        try:
            while self.running:
                # Process tasks
                self._process_tasks()
                
                # Sleep for poll interval
                time.sleep(self.poll_interval)
        except KeyboardInterrupt:
            logger.info("Worker stopped by user")
        except Exception as e:
            logger.error(f"Worker error: {e}")
        finally:
            self.running = False
            logger.info("Worker stopped")
    
    def stop(self):
        """Stop the worker."""
        self.running = False
    
    def _process_tasks(self):
        """Process pending tasks."""
        # This is a placeholder for actual task processing
        # In a real implementation, this would fetch tasks from a queue or database
        logger.debug("Processing tasks")
        
        # Simulate task processing
        # In a real implementation, this would process actual tasks
        pass


def main():
    """Main entry point for the worker."""
    # Create and start the worker
    worker = Worker(
        poll_interval=int(os.environ.get("AIBOM_POLL_INTERVAL", 60)),
        hf_token=os.environ.get("HF_TOKEN"),
        inference_model_url=os.environ.get("AIBOM_INFERENCE_URL"),
        use_inference=os.environ.get("AIBOM_USE_INFERENCE", "true").lower() == "true",
        cache_dir=os.environ.get("AIBOM_CACHE_DIR"),
    )
    
    worker.start()


if __name__ == "__main__":
    main()