# Start from the TGI base image FROM ghcr.io/huggingface/text-generation-inference:1.3 as base # Install JupyterLab # Note: Ensure Python is installed in the base image. If not, you will need to install it. RUN pip install jupyterlab jupyterlab-vim==0.15.1 jupyterlab-vimrc # Copy any necessary files (if needed) # COPY your-files /your-destination # AWS Sagemaker compatible image # Assuming this part remains the same from your original Dockerfile FROM base as sagemaker COPY sagemaker-entrypoint.sh entrypoint.sh RUN chmod +x entrypoint.sh ENTRYPOINT ["./entrypoint.sh"] # Final image FROM base # Add JupyterLab entrypoint # Note: You can customize the command to suit your needs ENTRYPOINT ["jupyter", "lab", "--ip=0.0.0.0", "--allow-root", "--NotebookApp.token='' --port 7860"] # Optional: Set CMD to launch TGI or any other command #CMD ["text-generation-launcher", "--json-output"]