Spaces:
Running
on
Zero
Running
on
Zero
add xformers
Browse files- requirements.txt +20 -24
requirements.txt
CHANGED
@@ -1,28 +1,7 @@
|
|
1 |
-
# fastapi==0.115.6
|
2 |
-
# starlette==0.41.3
|
3 |
-
# uvicorn==0.22.0
|
4 |
-
# python-multipart==0.0.6
|
5 |
-
# gradio==3.32.0
|
6 |
-
# # torch==2.0.1
|
7 |
-
# torchvision==0.15.2
|
8 |
|
9 |
-
#
|
10 |
-
# accelerate==0.20.3
|
11 |
-
# safetensors==0.3.1
|
12 |
-
# pillow==9.5.0
|
13 |
-
# numpy==1.24.3
|
14 |
-
# packaging==23.1
|
15 |
-
# pydantic==2.10.5
|
16 |
-
# tqdm==4.65.0
|
17 |
-
# typing-extensions==4.12.2
|
18 |
-
# python-dotenv==1.0.0
|
19 |
-
|
20 |
-
|
21 |
-
# # Optional but recommended for memory efficiency
|
22 |
# xformers==0.0.20
|
23 |
|
24 |
-
# diffusers==0.18.2
|
25 |
-
# huggingface_hub==0.27.0
|
26 |
--extra-index-url https://download.pytorch.org/whl/cu113
|
27 |
fastapi==0.115.6
|
28 |
uvicorn==0.22.0
|
@@ -32,7 +11,24 @@ Pillow==9.4.0
|
|
32 |
numpy==1.23.5
|
33 |
gradio==5.12.0
|
34 |
huggingface_hub
|
35 |
-
torch==2.4.0
|
36 |
tokenizers==0.21.0
|
37 |
starlette==0.41.3
|
38 |
-
accelerate==1.3.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
|
2 |
+
#recommended for memory efficiency
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
# xformers==0.0.20
|
4 |
|
|
|
|
|
5 |
--extra-index-url https://download.pytorch.org/whl/cu113
|
6 |
fastapi==0.115.6
|
7 |
uvicorn==0.22.0
|
|
|
11 |
numpy==1.23.5
|
12 |
gradio==5.12.0
|
13 |
huggingface_hub
|
14 |
+
# torch==2.4.0
|
15 |
tokenizers==0.21.0
|
16 |
starlette==0.41.3
|
17 |
+
accelerate==1.3.0
|
18 |
+
xformers==0.0.29.post1
|
19 |
+
# xformers==0.0.20
|
20 |
+
# Successfully installed
|
21 |
+
nvidia-cublas-cu12==12.4.5.8
|
22 |
+
nvidia-cuda-cupti-cu12==12.4.127
|
23 |
+
nvidia-cuda-nvrtc-cu12==12.4.127
|
24 |
+
nvidia-cuda-runtime-cu12==12.4.127
|
25 |
+
nvidia-cufft-cu12==11.2.1.3
|
26 |
+
nvidia-curand-cu12==10.3.5.147
|
27 |
+
nvidia-cusolver-cu12==11.6.1.9
|
28 |
+
nvidia-cusparse-cu12==12.3.1.170
|
29 |
+
nvidia-nccl-cu12==2.21.5
|
30 |
+
nvidia-nvjitlink-cu12==12.4.127
|
31 |
+
nvidia-nvtx-cu12==12.4.127
|
32 |
+
sympy==1.13.1
|
33 |
+
torch==2.5.1
|
34 |
+
triton==3.1.0
|