Sergidev commited on
Commit
bdb0ed5
·
verified ·
1 Parent(s): 5a31216

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +31 -18
Dockerfile CHANGED
@@ -5,15 +5,13 @@ RUN apt-get update && apt-get install -y \
5
  git \
6
  python3.10 \
7
  python3-pip \
 
8
  wget \
9
  ninja-build \
10
  gcc \
11
  g++ \
12
  && rm -rf /var/lib/apt/lists/*
13
 
14
- # Create symbolic link for python
15
- RUN ln -s /usr/bin/python3.10 /usr/bin/python
16
-
17
  WORKDIR /app
18
 
19
  # Install basic Python packages first
@@ -29,6 +27,11 @@ ENV CUDA_HOME=/usr/local/cuda
29
  ENV PATH=${CUDA_HOME}/bin:${PATH}
30
  ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}
31
 
 
 
 
 
 
32
  # Install dependencies in order
33
  COPY requirements.txt .
34
  RUN pip3 install --no-cache-dir \
@@ -56,11 +59,8 @@ RUN git clone -b self-lengthen https://github.com/quanshr/FastChat.git && \
56
  # Install LLaMA Factory
57
  RUN pip3 install --no-cache-dir llamafactory
58
 
59
- # Copy project structure
60
- COPY qwen /app/qwen
61
- COPY llama /app/llama
62
- COPY eval /app/eval
63
- COPY app.py /app/app.py
64
 
65
  # Set environment variables
66
  ENV CUDA_VISIBLE_DEVICES=0
@@ -68,20 +68,33 @@ ENV WORLD_SIZE=1
68
  ENV RANK=0
69
  ENV MASTER_ADDR=localhost
70
  ENV MASTER_PORT=29500
71
-
72
- # Create directories
73
- RUN mkdir -p /app/qwen/results
74
 
75
  # Create startup script
76
  RUN echo '#!/bin/bash\n\
77
- if [ "$USE_UI" = "true" ]; then\n\
78
- python app.py\n\
79
- else\n\
80
- cd /app/qwen\n\
81
- bash run.sh --base_model=$MODEL_PATH --instruct_count=$INSTRUCT_COUNT --max_iter=$MAX_ITER\n\
82
- python collect_data.py\n\
83
- fi' > /app/start.sh && \
 
 
 
 
 
 
 
84
  chmod +x /app/start.sh
85
 
 
 
 
 
 
 
86
  # Command to run
87
  ENTRYPOINT ["/app/start.sh"]
 
5
  git \
6
  python3.10 \
7
  python3-pip \
8
+ python-is-python3 \
9
  wget \
10
  ninja-build \
11
  gcc \
12
  g++ \
13
  && rm -rf /var/lib/apt/lists/*
14
 
 
 
 
15
  WORKDIR /app
16
 
17
  # Install basic Python packages first
 
27
  ENV PATH=${CUDA_HOME}/bin:${PATH}
28
  ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}
29
 
30
+ # Clone Self-Lengthen repository
31
+ RUN git clone https://github.com/QwenLM/Self-Lengthen.git && \
32
+ mv Self-Lengthen/* . && \
33
+ rm -rf Self-Lengthen
34
+
35
  # Install dependencies in order
36
  COPY requirements.txt .
37
  RUN pip3 install --no-cache-dir \
 
59
  # Install LLaMA Factory
60
  RUN pip3 install --no-cache-dir llamafactory
61
 
62
+ # Create directories for models and results
63
+ RUN mkdir -p models results
 
 
 
64
 
65
  # Set environment variables
66
  ENV CUDA_VISIBLE_DEVICES=0
 
68
  ENV RANK=0
69
  ENV MASTER_ADDR=localhost
70
  ENV MASTER_PORT=29500
71
+ ENV MODEL_PATH=/app/models/base_model
72
+ ENV INSTRUCT_COUNT=5000
73
+ ENV MAX_ITER=3
74
 
75
  # Create startup script
76
  RUN echo '#!/bin/bash\n\
77
+ # Download model if needed\n\
78
+ if [ ! -d "$MODEL_PATH" ]; then\n\
79
+ mkdir -p $MODEL_PATH\n\
80
+ git lfs install\n\
81
+ git clone https://huggingface.co/Qwen/Qwen2-7B-Instruct $MODEL_PATH\n\
82
+ fi\n\
83
+ \n\
84
+ # Run the training process\n\
85
+ cd /app/qwen\n\
86
+ bash run.sh --base_model=$MODEL_PATH --instruct_count=$INSTRUCT_COUNT --max_iter=$MAX_ITER\n\
87
+ python collect_data.py\n\
88
+ \n\
89
+ # Start the web interface\n\
90
+ python app.py\n' > /app/start.sh && \
91
  chmod +x /app/start.sh
92
 
93
+ # Create a simple web interface
94
+ COPY app.py .
95
+
96
+ # Expose port for web interface
97
+ EXPOSE 7860
98
+
99
  # Command to run
100
  ENTRYPOINT ["/app/start.sh"]