Commit
·
4ceee35
1
Parent(s):
fe612e9
Refactor image_tab function to update generator initialization with device parameter
Browse files- tabs/images/handlers.py +2 -2
tabs/images/handlers.py
CHANGED
@@ -213,7 +213,7 @@ def gen_img(request: BaseReq | BaseImg2ImgReq | BaseInpaintReq, progress=gr.Prog
|
|
213 |
progress(0.3, "Getting Prompt Embeddings")
|
214 |
# Get Prompt Embeddings
|
215 |
if isinstance(pipeline, flux_pipes):
|
216 |
-
positive_prompt_embeds, positive_prompt_pooled = get_weighted_text_embeddings_flux1(pipeline, request.prompt)
|
217 |
elif isinstance(pipeline, sd_pipes):
|
218 |
positive_prompt_embeds, negative_prompt_embeds, positive_prompt_pooled, negative_prompt_pooled = get_weighted_text_embeddings_sdxl(pipeline, request.prompt, request.negative_prompt)
|
219 |
|
@@ -227,7 +227,7 @@ def gen_img(request: BaseReq | BaseImg2ImgReq | BaseInpaintReq, progress=gr.Prog
|
|
227 |
'num_images_per_prompt': request.num_images_per_prompt,
|
228 |
'num_inference_steps': request.num_inference_steps,
|
229 |
'guidance_scale': request.guidance_scale,
|
230 |
-
'generator': [torch.Generator().manual_seed(request.seed + i) if not request.seed is any([None, 0, -1]) else torch.Generator().manual_seed(random.randint(0, 2**32 - 1)) for i in range(request.num_images_per_prompt)],
|
231 |
}
|
232 |
|
233 |
if isinstance(pipeline, sd_pipes):
|
|
|
213 |
progress(0.3, "Getting Prompt Embeddings")
|
214 |
# Get Prompt Embeddings
|
215 |
if isinstance(pipeline, flux_pipes):
|
216 |
+
positive_prompt_embeds, positive_prompt_pooled = get_weighted_text_embeddings_flux1(pipeline, request.prompt, device=device)
|
217 |
elif isinstance(pipeline, sd_pipes):
|
218 |
positive_prompt_embeds, negative_prompt_embeds, positive_prompt_pooled, negative_prompt_pooled = get_weighted_text_embeddings_sdxl(pipeline, request.prompt, request.negative_prompt)
|
219 |
|
|
|
227 |
'num_images_per_prompt': request.num_images_per_prompt,
|
228 |
'num_inference_steps': request.num_inference_steps,
|
229 |
'guidance_scale': request.guidance_scale,
|
230 |
+
'generator': [torch.Generator(device=device).manual_seed(request.seed + i) if not request.seed is any([None, 0, -1]) else torch.Generator(device=device).manual_seed(random.randint(0, 2**32 - 1)) for i in range(request.num_images_per_prompt)],
|
231 |
}
|
232 |
|
233 |
if isinstance(pipeline, sd_pipes):
|