# Use a base image that supports the requirements of the TinyLlama model FROM python:3.9-slim # Set the working directory in the container WORKDIR /app # Install required dependencies (if any) # RUN pip install # Copy the required files or directories from the local file system to the container # COPY # Set environment variables if needed # ENV = # Expose the port the app runs on EXPOSE 8080 # Set shared memory size # Note: --shm-size is a docker run parameter and cannot be set directly in the Dockerfile. # You can handle this by running the container with appropriate parameters. # Set the volume for data persistence VOLUME ["/data"] # The command to run the TinyLlama model using the specified docker image CMD ["docker", "run", "--shm-size", "1g", "-p", "8080:80", "-v", "/data:/data", "ghcr.io/huggingface/text-generation-inference:1.3", "--model-id", "TinyLlama/TinyLlama-1.1B-Chat-v0.4", "--quantize", "bitsandbytes-fp4"]