File size: 1,344 Bytes
917a5e4
 
 
b1b6cd8
 
 
917a5e4
b1b6cd8
917a5e4
b1b6cd8
4dab6d1
b1b6cd8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
917a5e4
b1b6cd8
4dab6d1
b1b6cd8
917a5e4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#!/bin/bash
set -e

# Default inference URL for internal inference model service
DEFAULT_INFERENCE_URL="http://localhost:8000/extract"
export AIBOM_INFERENCE_URL=${AIBOM_INFERENCE_URL:-$DEFAULT_INFERENCE_URL}

echo "Using AIBOM_INFERENCE_URL: $AIBOM_INFERENCE_URL"

# Check if command-line arguments are provided
if [ -n "$1" ]; then
  case "$1" in
    server)
      # Start the API server explicitly (recommended for Hugging Face Spaces)
      echo "Starting AIBOM Generator API server..."
      exec uvicorn src.aibom_generator.api:app --host 0.0.0.0 --port ${PORT:-7860}
      ;;
    worker)
      # Start the background worker
      echo "Starting AIBOM Generator background worker..."
      exec python -m src.aibom_generator.worker
      ;;
    inference)
      # Start the inference model server
      echo "Starting AIBOM Generator inference model server..."
      exec python -m src.aibom_generator.inference_model --host 0.0.0.0 --port ${PORT:-8000}
      ;;
    *)
      # Run as CLI with provided arguments
      echo "Running AIBOM Generator CLI..."
      exec python -m src.aibom_generator.cli "$@"
      ;;
  esac
else
  # Default behavior (if no arguments): start API server (web UI mode)
  echo "Starting AIBOM Generator API server (web UI)..."
  exec uvicorn src.aibom_generator.api:app --host 0.0.0.0 --port ${PORT:-7860}
fi