Spaces:
Paused
Paused
tmzh
commited on
Commit
·
5695fa2
1
Parent(s):
b4535f2
flash_attn
Browse files- app.py +4 -0
- requirements.txt +1 -2
app.py
CHANGED
@@ -15,6 +15,10 @@ import requests
|
|
15 |
from outlines import models, generate, samplers
|
16 |
from pydantic import BaseModel
|
17 |
|
|
|
|
|
|
|
|
|
18 |
# Constants
|
19 |
MAX_IMAGE_SIZE = (1024, 1024)
|
20 |
TARGET_IMAGE_SIZE = 180_000
|
|
|
15 |
from outlines import models, generate, samplers
|
16 |
from pydantic import BaseModel
|
17 |
|
18 |
+
# Install Flash attention
|
19 |
+
import subprocess
|
20 |
+
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
21 |
+
|
22 |
# Constants
|
23 |
MAX_IMAGE_SIZE = (1024, 1024)
|
24 |
TARGET_IMAGE_SIZE = 180_000
|
requirements.txt
CHANGED
@@ -3,5 +3,4 @@ torch
|
|
3 |
accelerate
|
4 |
git+https://github.com/huggingface/transformers.git@main
|
5 |
spaces
|
6 |
-
torchvision
|
7 |
-
https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.9.post1/flash_attn-2.5.9.post1+cu118torch1.12cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
|
|
3 |
accelerate
|
4 |
git+https://github.com/huggingface/transformers.git@main
|
5 |
spaces
|
6 |
+
torchvision
|
|