numpy==1.25.0 | |
torch==2.4.0 | |
transformers==4.46.2 | |
pillow==10.3.0 | |
# https://github.com/Dao-AILab/flash-attention/releases/download/v2.6.3/flash_attn-2.6.3+cu123torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl | |
# https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.0.post2/flash_attn-2.7.0.post2+cu11torch2.4cxx11abiFALSE-cp311-cp311-linux_x86_64.whl |