File size: 2,206 Bytes
dc9e27a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
services:
  backend:
    container_name: mindsearch-backend
    build:
      context: .
      dockerfile: backend.dockerfile
    image: mindsearch/backend:latest
    restart: unless-stopped
    # Uncomment the following line to force using local build
    # pull: never
    ports:
      - "8002:8002"
    environment:
      - PYTHONUNBUFFERED=1
      # - OPENAI_API_KEY=${OPENAI_API_KEY:-}
      - OPENAI_API_BASE=${OPENAI_API_BASE:-https://api.openai.com/v1}
      # - QWEN_API_KEY=${QWEN_API_KEY:-}
      # - SILICON_API_KEY=${SILICON_API_KEY:-}
    command: python -m mindsearch.app --lang ${LANG:-cn} --model_format ${MODEL_FORMAT:-internlm_server}
    volumes:
      - /root/.cache:/root/.cache
    deploy:
      resources:
        reservations:
          devices:
            - driver: nvidia
              count: 1
              capabilities: [gpu]
    # GPU support explanation:
    # The current configuration has been tested with NVIDIA GPUs. If you use other types of GPUs, you may need to adjust the configuration.
    # For AMD GPUs, you can try using the ROCm driver by modifying the configuration as follows:
    # deploy:
    #   resources:
    #     reservations:
    #       devices:
    #         - driver: amd
    #           count: 1
    #           capabilities: [gpu]
    #
    # For other GPU types, you may need to consult the respective Docker GPU support documentation.
    # In theory, any GPU supported by PyTorch should be configurable here.
    # If you encounter issues, try the following steps:
    # 1. Ensure the correct GPU drivers are installed on the host
    # 2. Check if your Docker version supports your GPU type
    # 3. Install necessary GPU-related libraries in the Dockerfile
    # 4. Adjust the deploy configuration here to match your GPU type
    #
    # Note: After changing GPU configuration, you may need to rebuild the image.

  frontend:
    container_name: mindsearch-frontend
    build:
      context: .
      dockerfile: frontend.dockerfile
    image: mindsearch/frontend:latest
    restart: unless-stopped
    # Uncomment the following line to force using local build
    # pull: never
    ports:
      - "8080:8080"
    depends_on:
      - backend