# Use an official Python runtime as a parent image FROM python:3.8-slim # Set the working directory to /app WORKDIR /app # Copy the current directory contents into the container at /app COPY . /app # Install any needed packages specified in requirements.txt RUN pip install --trusted-host pypi.python.org -r requirements.txt && \ pip uninstall transformers && \ pip install transformers==4.29.2 # Make port 80 available to the world outside this container EXPOSE 80 # Set the TORTOISE_MODELS_DIR environment variable # ENV TORTOISE_MODELS_DIR tortoise/models/pretrained_models # Create the directory for pretrained models # RUN mkdir -p $TORTOISE_MODELS_DIR RUN echo "Downloading models through docker container..." # Download all the models RUN wget -O $TORTOISE_MODELS_DIR/autoregressive.pth https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/autoregressive.pth && \ wget -O $TORTOISE_MODELS_DIR/classifier.pth https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/classifier.pth && \ wget -O $TORTOISE_MODELS_DIR/clvp2.pth https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/clvp2.pth && \ wget -O $TORTOISE_MODELS_DIR/cvvp.pth https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/cvvp.pth && \ wget -O $TORTOISE_MODELS_DIR/diffusion_decoder.pth https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/diffusion_decoder.pth && \ wget -O $TORTOISE_MODELS_DIR/vocoder.pth https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/vocoder.pth && \ wget -O $TORTOISE_MODELS_DIR/rlg_auto.pth https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/rlg_auto.pth && \ wget -O $TORTOISE_MODELS_DIR/rlg_diffuser.pth https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/rlg_diffuser.pth && \ wget -O $TORTOISE_MODELS_DIR/bigvgan_base_24khz_100band_g.pth https://drive.google.com/uc?id=1_cKskUDuvxQJUEBwdgjAxKuDTUW6kPdY && \ wget -O $TORTOISE_MODELS_DIR/bigvgan_24khz_100band_g.pth https://drive.google.com/uc?id=1wmP_mAs7d00KHVfVEl8B5Gb72Kzpcavp RUN echo "Finished downloading models through docker container..." RUN echo "Current directory contents:" RUN ls -la # Run app.py when the container launches CMD ["streamlit","run", "app.py"]