Upload folder using huggingface_hub
Browse files- __pycache__/app.cpython-310.pyc +0 -0
- app.py +4 -3
__pycache__/app.cpython-310.pyc
CHANGED
Binary files a/__pycache__/app.cpython-310.pyc and b/__pycache__/app.cpython-310.pyc differ
|
|
app.py
CHANGED
@@ -3,12 +3,13 @@ import gradio as gr
|
|
3 |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
4 |
from diffusers.utils import export_to_video
|
5 |
|
6 |
-
|
|
|
7 |
|
8 |
def generate_video(prompt):
|
9 |
# load pipeline
|
10 |
-
pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
|
11 |
-
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
12 |
|
13 |
# optimize for GPU memory
|
14 |
pipe.enable_model_cpu_offload()
|
|
|
3 |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
4 |
from diffusers.utils import export_to_video
|
5 |
|
6 |
+
# Отключение CUDA (GPU)
|
7 |
+
torch.set_device('cpu')
|
8 |
|
9 |
def generate_video(prompt):
|
10 |
# load pipeline
|
11 |
+
pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
|
12 |
+
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
13 |
|
14 |
# optimize for GPU memory
|
15 |
pipe.enable_model_cpu_offload()
|