FROM python:3.10 # Set the working directory WORKDIR /python-docker # Copy the requirements file and install dependencies COPY requirements.txt requirements.txt RUN apt-get update && apt-get install -y git ffmpeg # Install Python dependencies without caching RUN pip install --no-cache-dir -r requirements.txt # Create cache directories for Hugging Face and set permissions RUN mkdir -p /python-docker/.cache/huggingface && \ chown -R 1000:1000 /python-docker/.cache # Set the TRANSFORMERS_CACHE environment variable to the writable cache directory ENV TRANSFORMERS_CACHE=/python-docker/.cache/huggingface # Install Whisper model to ensure it's available RUN python -c "from transformers import pipeline; pipeline('automatic-speech-recognition', model='openai/whisper-small')" # Copy the entire application with appropriate permissions COPY --chown=1000:1000 . . # Expose the port the app runs on EXPOSE 8001 # Set the command to run the application CMD ["uvicorn", "fastapi_app:app", "--host", "0.0.0.0", "--port", "8001"]