File size: 1,576 Bytes
1bcef92 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
# Use a more recent, slim Python base image
FROM python:3.10-slim
# Set the working directory in the container
WORKDIR /app
# Prevent Python from writing pyc files to disc (optional)
ENV PYTHONDONTWRITEBYTECODE 1
# Ensure Python output is sent straight to terminal (useful for logs)
ENV PYTHONUNBUFFERED 1
# Upgrade pip
RUN python -m pip install --upgrade pip
# Copy the requirements file into the container
COPY requirements.txt .
# Install dependencies
# --no-cache-dir reduces image size
# --default-timeout=100 increases timeout for pip install
RUN pip install --no-cache-dir -r requirements.txt
COPY .env .
# Copy the application code into the container
# This includes the API file and the core logic directory
COPY api.py .
COPY ./kig_core ./kig_core
# Command to run the Uvicorn server
# It will look for an object named 'app' in the 'api.py' file
# Runs on port 8000 and listens on all interfaces (0.0.0.0)
# Note: For production, consider removing --reload
CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"]
# --- Notes ---
# Environment Variables:
# This Dockerfile assumes you will provide necessary environment variables
# (NEO4J_URI, NEO4J_PASSWORD, GEMINI_API_KEY, OPENAI_API_KEY, etc.)
# when running the container, for example using 'docker run -e VAR=value ...'
# or a docker-compose.yml file.
# DO NOT hardcode secrets directly in the Dockerfile.
#
# Cache Folders:
# Removed HF_HOME/TORCH_HOME as this app primarily uses external APIs (Gemini/OpenAI)
# and Neo4j, not local Hugging Face/PyTorch models needing specific cache dirs. |