DDPM training example
Browse files- pipeline_glide.py +3 -4
pipeline_glide.py
CHANGED
@@ -24,6 +24,9 @@ import torch.utils.checkpoint
|
|
24 |
from torch import nn
|
25 |
|
26 |
import tqdm
|
|
|
|
|
|
|
27 |
from transformers import CLIPConfig, CLIPModel, CLIPTextConfig, CLIPVisionConfig, GPT2Tokenizer
|
28 |
from transformers.activations import ACT2FN
|
29 |
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
|
@@ -36,10 +39,6 @@ from transformers.utils import (
|
|
36 |
replace_return_docstrings,
|
37 |
)
|
38 |
|
39 |
-
from diffusers.models import GLIDESuperResUNetModel, GLIDETextToImageUNetModel
|
40 |
-
from diffusers.pipeline_utils import DiffusionPipeline
|
41 |
-
from diffusers.schedulers import ClassifierFreeGuidanceScheduler, DDIMScheduler
|
42 |
-
|
43 |
|
44 |
#####################
|
45 |
# START OF THE CLIP MODEL COPY-PASTE (with a modified attention module)
|
|
|
24 |
from torch import nn
|
25 |
|
26 |
import tqdm
|
27 |
+
from diffusers.models import GLIDESuperResUNetModel, GLIDETextToImageUNetModel
|
28 |
+
from diffusers.pipeline_utils import DiffusionPipeline
|
29 |
+
from diffusers.schedulers import ClassifierFreeGuidanceScheduler, DDIMScheduler
|
30 |
from transformers import CLIPConfig, CLIPModel, CLIPTextConfig, CLIPVisionConfig, GPT2Tokenizer
|
31 |
from transformers.activations import ACT2FN
|
32 |
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
|
|
|
39 |
replace_return_docstrings,
|
40 |
)
|
41 |
|
|
|
|
|
|
|
|
|
42 |
|
43 |
#####################
|
44 |
# START OF THE CLIP MODEL COPY-PASTE (with a modified attention module)
|