JoPmt commited on
Commit
2f94b61
·
1 Parent(s): 9511176

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -13
app.py CHANGED
@@ -1,22 +1,16 @@
1
  from PIL import Image
2
  import gradio as gr
3
- import numpy as np
4
- import random, os, gc, base64, io
5
- import cv2
6
  import torch
7
  from accelerate import Accelerator
8
- from transformers import pipeline, AutoModel, CLIPTextModel, CLIPTokenizer
9
  from diffusers.utils import load_image
10
- from diffusers import EulerDiscreteScheduler, UNet2DConditionModel, AutoencoderKL, DiffusionPipeline
11
- from gradio_client import Client
12
 
13
  accelerator = Accelerator(cpu=True)
14
  pipe = accelerator.prepare(DiffusionPipeline.from_pretrained("stabilityai/sd-turbo", torch_dtype=torch.float32, use_safetensors=True, safety_checker=None))
15
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
16
- ##pipe.text_encoder = CLIPTextModel._from_config(pipe.text_encoder.config)
17
- ##pipe.tokenizer = CLIPTokenizer.from_config(pipe.tokenizer.config)
18
- ##pipe.UNet2DConditionModel = UNet2DConditionModel.from_config("stabilityai/sd-turbo", subfolder="unet")
19
- ##pipe.AutoencoderKL = AutoencoderKL.from_config("stabilityai/sd-turbo", subfolder="vae")
20
  pipe = accelerator.prepare(pipe.to("cpu"))
21
  generator = torch.Generator("cpu").manual_seed(random.randint(1, 867346))
22
  apol=[]
@@ -24,10 +18,11 @@ apol=[]
24
  def plex(prompt):
25
  gc.collect()
26
  apol=[]
27
- imags = pipe(prompt=prompt,negative_prompt="bad quality",num_inference_steps=5,width=512,height=512,generator=generator).images[0]
28
- apol.append(imags)
 
29
  return apol
30
 
31
- iface = gr.Interface(fn=plex,inputs=gr.Textbox(), outputs=gr.Gallery(columns=2), title="Img2Img_SkyV22CntrlNet_CPU", description="Running on CPU, very slow!")
32
  iface.queue(max_size=1)
33
  iface.launch(max_threads=1)
 
1
  from PIL import Image
2
  import gradio as gr
3
+ import random, os, gc
 
 
4
  import torch
5
  from accelerate import Accelerator
6
+ from transformers import pipeline
7
  from diffusers.utils import load_image
8
+ from diffusers import EulerDiscreteScheduler, DiffusionPipeline
 
9
 
10
  accelerator = Accelerator(cpu=True)
11
  pipe = accelerator.prepare(DiffusionPipeline.from_pretrained("stabilityai/sd-turbo", torch_dtype=torch.float32, use_safetensors=True, safety_checker=None))
12
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
13
+ pipe.unet.to(memory_format=torch.channels_last)
 
 
 
14
  pipe = accelerator.prepare(pipe.to("cpu"))
15
  generator = torch.Generator("cpu").manual_seed(random.randint(1, 867346))
16
  apol=[]
 
18
  def plex(prompt):
19
  gc.collect()
20
  apol=[]
21
+ imags = pipe(prompt=[prompt]*2,negative_prompt=["bad quality"]*2,num_inference_steps=5,width=512,height=512,generator=generator)
22
+ for i, igs in enumerate(imas["images"]):
23
+ apol.append(igs)
24
  return apol
25
 
26
+ iface = gr.Interface(fn=plex,inputs=gr.Textbox(), outputs=gr.Gallery(columns=2), title="Stabilityai SD-Turbo CPU", description="Running on CPU, very slow!")
27
  iface.queue(max_size=1)
28
  iface.launch(max_threads=1)