Spaces:
Running
on
Zero
Running
on
Zero
name: Fast GPU Tests on PR | |
on: | |
pull_request: | |
branches: main | |
paths: | |
- "src/diffusers/models/modeling_utils.py" | |
- "src/diffusers/models/model_loading_utils.py" | |
- "src/diffusers/pipelines/pipeline_utils.py" | |
- "src/diffusers/pipeline_loading_utils.py" | |
- "src/diffusers/loaders/lora_base.py" | |
- "src/diffusers/loaders/lora_pipeline.py" | |
- "src/diffusers/loaders/peft.py" | |
- "tests/pipelines/test_pipelines_common.py" | |
- "tests/models/test_modeling_common.py" | |
workflow_dispatch: | |
concurrency: | |
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} | |
cancel-in-progress: true | |
env: | |
DIFFUSERS_IS_CI: yes | |
OMP_NUM_THREADS: 8 | |
MKL_NUM_THREADS: 8 | |
HF_HUB_ENABLE_HF_TRANSFER: 1 | |
PYTEST_TIMEOUT: 600 | |
PIPELINE_USAGE_CUTOFF: 1000000000 # set high cutoff so that only always-test pipelines run | |
jobs: | |
check_code_quality: | |
runs-on: ubuntu-22.04 | |
steps: | |
- uses: actions/checkout@v3 | |
- name: Set up Python | |
uses: actions/setup-python@v4 | |
with: | |
python-version: "3.8" | |
- name: Install dependencies | |
run: | | |
python -m pip install --upgrade pip | |
pip install .[quality] | |
- name: Check quality | |
run: make quality | |
- name: Check if failure | |
if: ${{ failure() }} | |
run: | | |
echo "Quality check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make style && make quality'" >> $GITHUB_STEP_SUMMARY | |
check_repository_consistency: | |
needs: check_code_quality | |
runs-on: ubuntu-22.04 | |
steps: | |
- uses: actions/checkout@v3 | |
- name: Set up Python | |
uses: actions/setup-python@v4 | |
with: | |
python-version: "3.8" | |
- name: Install dependencies | |
run: | | |
python -m pip install --upgrade pip | |
pip install .[quality] | |
- name: Check repo consistency | |
run: | | |
python utils/check_copies.py | |
python utils/check_dummies.py | |
python utils/check_support_list.py | |
make deps_table_check_updated | |
- name: Check if failure | |
if: ${{ failure() }} | |
run: | | |
echo "Repo consistency check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make fix-copies'" >> $GITHUB_STEP_SUMMARY | |
setup_torch_cuda_pipeline_matrix: | |
needs: [check_code_quality, check_repository_consistency] | |
name: Setup Torch Pipelines CUDA Slow Tests Matrix | |
runs-on: | |
group: aws-general-8-plus | |
container: | |
image: diffusers/diffusers-pytorch-cpu | |
outputs: | |
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }} | |
steps: | |
- name: Checkout diffusers | |
uses: actions/checkout@v3 | |
with: | |
fetch-depth: 2 | |
- name: Install dependencies | |
run: | | |
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" | |
python -m uv pip install -e [quality,test] | |
- name: Environment | |
run: | | |
python utils/print_env.py | |
- name: Fetch Pipeline Matrix | |
id: fetch_pipeline_matrix | |
run: | | |
matrix=$(python utils/fetch_torch_cuda_pipeline_test_matrix.py) | |
echo $matrix | |
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT | |
- name: Pipeline Tests Artifacts | |
if: ${{ always() }} | |
uses: actions/upload-artifact@v4 | |
with: | |
name: test-pipelines.json | |
path: reports | |
torch_pipelines_cuda_tests: | |
name: Torch Pipelines CUDA Tests | |
needs: setup_torch_cuda_pipeline_matrix | |
strategy: | |
fail-fast: false | |
max-parallel: 8 | |
matrix: | |
module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }} | |
runs-on: | |
group: aws-g4dn-2xlarge | |
container: | |
image: diffusers/diffusers-pytorch-cuda | |
options: --shm-size "16gb" --ipc host --gpus 0 | |
steps: | |
- name: Checkout diffusers | |
uses: actions/checkout@v3 | |
with: | |
fetch-depth: 2 | |
- name: NVIDIA-SMI | |
run: | | |
nvidia-smi | |
- name: Install dependencies | |
run: | | |
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" | |
python -m uv pip install -e [quality,test] | |
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git | |
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps | |
- name: Environment | |
run: | | |
python utils/print_env.py | |
- name: Extract tests | |
id: extract_tests | |
run: | | |
pattern=$(python utils/extract_tests_from_mixin.py --type pipeline) | |
echo "$pattern" > /tmp/test_pattern.txt | |
echo "pattern_file=/tmp/test_pattern.txt" >> $GITHUB_OUTPUT | |
- name: PyTorch CUDA checkpoint tests on Ubuntu | |
env: | |
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} | |
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms | |
CUBLAS_WORKSPACE_CONFIG: :16:8 | |
run: | | |
if [ "${{ matrix.module }}" = "ip_adapters" ]; then | |
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \ | |
-s -v -k "not Flax and not Onnx" \ | |
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \ | |
tests/pipelines/${{ matrix.module }} | |
else | |
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }}) | |
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \ | |
-s -v -k "not Flax and not Onnx and $pattern" \ | |
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \ | |
tests/pipelines/${{ matrix.module }} | |
fi | |
- name: Failure short reports | |
if: ${{ failure() }} | |
run: | | |
cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt | |
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt | |
- name: Test suite reports artifacts | |
if: ${{ always() }} | |
uses: actions/upload-artifact@v4 | |
with: | |
name: pipeline_${{ matrix.module }}_test_reports | |
path: reports | |
torch_cuda_tests: | |
name: Torch CUDA Tests | |
needs: [check_code_quality, check_repository_consistency] | |
runs-on: | |
group: aws-g4dn-2xlarge | |
container: | |
image: diffusers/diffusers-pytorch-cuda | |
options: --shm-size "16gb" --ipc host --gpus 0 | |
defaults: | |
run: | |
shell: bash | |
strategy: | |
fail-fast: false | |
max-parallel: 2 | |
matrix: | |
module: [models, schedulers, lora, others] | |
steps: | |
- name: Checkout diffusers | |
uses: actions/checkout@v3 | |
with: | |
fetch-depth: 2 | |
- name: Install dependencies | |
run: | | |
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" | |
python -m uv pip install -e [quality,test] | |
python -m uv pip install peft@git+https://github.com/huggingface/peft.git | |
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git | |
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps | |
- name: Environment | |
run: | | |
python utils/print_env.py | |
- name: Extract tests | |
id: extract_tests | |
run: | | |
pattern=$(python utils/extract_tests_from_mixin.py --type ${{ matrix.module }}) | |
echo "$pattern" > /tmp/test_pattern.txt | |
echo "pattern_file=/tmp/test_pattern.txt" >> $GITHUB_OUTPUT | |
- name: Run PyTorch CUDA tests | |
env: | |
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} | |
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms | |
CUBLAS_WORKSPACE_CONFIG: :16:8 | |
run: | | |
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }}) | |
if [ -z "$pattern" ]; then | |
python -m pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx" tests/${{ matrix.module }} \ | |
--make-reports=tests_torch_cuda_${{ matrix.module }} | |
else | |
python -m pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx and $pattern" tests/${{ matrix.module }} \ | |
--make-reports=tests_torch_cuda_${{ matrix.module }} | |
fi | |
- name: Failure short reports | |
if: ${{ failure() }} | |
run: | | |
cat reports/tests_torch_cuda_${{ matrix.module }}_stats.txt | |
cat reports/tests_torch_cuda_${{ matrix.module }}_failures_short.txt | |
- name: Test suite reports artifacts | |
if: ${{ always() }} | |
uses: actions/upload-artifact@v4 | |
with: | |
name: torch_cuda_test_reports_${{ matrix.module }} | |
path: reports | |
run_examples_tests: | |
name: Examples PyTorch CUDA tests on Ubuntu | |
needs: [check_code_quality, check_repository_consistency] | |
runs-on: | |
group: aws-g4dn-2xlarge | |
container: | |
image: diffusers/diffusers-pytorch-cuda | |
options: --gpus 0 --shm-size "16gb" --ipc host | |
steps: | |
- name: Checkout diffusers | |
uses: actions/checkout@v3 | |
with: | |
fetch-depth: 2 | |
- name: NVIDIA-SMI | |
run: | | |
nvidia-smi | |
- name: Install dependencies | |
run: | | |
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" | |
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps | |
python -m uv pip install -e [quality,test,training] | |
- name: Environment | |
run: | | |
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" | |
python utils/print_env.py | |
- name: Run example tests on GPU | |
env: | |
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} | |
run: | | |
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" | |
python -m uv pip install timm | |
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/ | |
- name: Failure short reports | |
if: ${{ failure() }} | |
run: | | |
cat reports/examples_torch_cuda_stats.txt | |
cat reports/examples_torch_cuda_failures_short.txt | |
- name: Test suite reports artifacts | |
if: ${{ always() }} | |
uses: actions/upload-artifact@v4 | |
with: | |
name: examples_test_reports | |
path: reports | |