Spaces:
Running
Running
""" | |
Background worker for the AIBOM Generator. | |
This module provides a background worker that can be used to process | |
AIBOM generation tasks asynchronously. | |
""" | |
import logging | |
import os | |
import time | |
from typing import Dict, List, Optional, Any | |
from aibom_generator.generator import AIBOMGenerator | |
from aibom_generator.utils import setup_logging, calculate_completeness_score | |
# Set up logging | |
setup_logging() | |
logger = logging.getLogger(__name__) | |
class Worker: | |
""" | |
Background worker for AIBOM generation. | |
""" | |
def __init__( | |
self, | |
poll_interval: int = 60, | |
hf_token: Optional[str] = None, | |
inference_model_url: Optional[str] = None, | |
use_inference: bool = True, | |
cache_dir: Optional[str] = None, | |
): | |
""" | |
Initialize the worker. | |
Args: | |
poll_interval: Interval in seconds to poll for new tasks | |
hf_token: Hugging Face API token | |
inference_model_url: URL of the inference model service | |
use_inference: Whether to use the inference model | |
cache_dir: Directory to cache API responses and model cards | |
""" | |
self.poll_interval = poll_interval | |
self.generator = AIBOMGenerator( | |
hf_token=hf_token, | |
inference_model_url=inference_model_url, | |
use_inference=use_inference, | |
cache_dir=cache_dir, | |
) | |
self.running = False | |
def start(self): | |
"""Start the worker.""" | |
self.running = True | |
logger.info("Worker started") | |
try: | |
while self.running: | |
# Process tasks | |
self._process_tasks() | |
# Sleep for poll interval | |
time.sleep(self.poll_interval) | |
except KeyboardInterrupt: | |
logger.info("Worker stopped by user") | |
except Exception as e: | |
logger.error(f"Worker error: {e}") | |
finally: | |
self.running = False | |
logger.info("Worker stopped") | |
def stop(self): | |
"""Stop the worker.""" | |
self.running = False | |
def _process_tasks(self): | |
"""Process pending tasks.""" | |
# This is a placeholder for actual task processing | |
# In a real implementation, this would fetch tasks from a queue or database | |
logger.debug("Processing tasks") | |
# Simulate task processing | |
# In a real implementation, this would process actual tasks | |
pass | |
def main(): | |
"""Main entry point for the worker.""" | |
# Create and start the worker | |
worker = Worker( | |
poll_interval=int(os.environ.get("AIBOM_POLL_INTERVAL", 60)), | |
hf_token=os.environ.get("HF_TOKEN"), | |
inference_model_url=os.environ.get("AIBOM_INFERENCE_URL"), | |
use_inference=os.environ.get("AIBOM_USE_INFERENCE", "true").lower() == "true", | |
cache_dir=os.environ.get("AIBOM_CACHE_DIR"), | |
) | |
worker.start() | |
if __name__ == "__main__": | |
main() | |