chansung commited on
Commit
64444f4
·
1 Parent(s): 78906e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -10
app.py CHANGED
@@ -11,7 +11,7 @@ import gradio as gr
11
  from PIL import Image
12
  from datasets import load_dataset
13
  from datasets import DownloadMode, VerificationMode
14
- from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
15
 
16
  from pingpong import PingPong
17
  from pingpong.pingpong import PPManager
@@ -122,15 +122,13 @@ class GradioLLaMA2ChatPPManager(LLaMA2ChatPPManager):
122
  TOKEN = os.getenv('HF_TOKEN')
123
  MODEL_ID = 'meta-llama/Llama-2-70b-chat-hf'
124
 
125
- pipe = DiffusionPipeline.from_pretrained(
126
- "stabilityai/stable-diffusion-2-base",
127
- torch_dtype=torch.float16,
128
- revision="fp16"
 
129
  )
130
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(
131
- pipe.scheduler.config
132
- )
133
- pipe = pipe.to("cuda")
134
 
135
  STYLES = """
136
  .margin-auto {
@@ -301,7 +299,7 @@ def gen_art(editor, cover_art_image, gen_cover_art_prompt):
301
  pipe = pipe.to("cuda")
302
 
303
  return [
304
- pipe(gen_cover_art_prompt).images[0],
305
  gen_cover_art_prompt
306
  ]
307
 
 
11
  from PIL import Image
12
  from datasets import load_dataset
13
  from datasets import DownloadMode, VerificationMode
14
+ from diffusers import StableDiffusionXLPipeline
15
 
16
  from pingpong import PingPong
17
  from pingpong.pingpong import PPManager
 
122
  TOKEN = os.getenv('HF_TOKEN')
123
  MODEL_ID = 'meta-llama/Llama-2-70b-chat-hf'
124
 
125
+ pipe = StableDiffusionXLPipeline.from_pretrained(
126
+ "stabilityai/stable-diffusion-xl-base-1.0",
127
+ torch_dtype=torch.float16,
128
+ variant="fp16",
129
+ use_safetensors=True
130
  )
131
+ pipe.to("cuda")
 
 
 
132
 
133
  STYLES = """
134
  .margin-auto {
 
299
  pipe = pipe.to("cuda")
300
 
301
  return [
302
+ pipe(gen_cover_art_prompt, num_inference_steps=25).images[0].resize((512, 512)),
303
  gen_cover_art_prompt
304
  ]
305