seawolf2357 commited on
Commit
9c3d630
ยท
verified ยท
1 Parent(s): 4fd0739

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -21
app.py CHANGED
@@ -9,7 +9,7 @@ from translatepy import Translator
9
 
10
  translator = Translator()
11
 
12
- # Constants
13
  model = "Corcelio/mobius"
14
  vae_model = "madebyollin/sdxl-vae-fp16-fix"
15
 
@@ -29,20 +29,19 @@ JS = """function () {
29
  }
30
  }"""
31
 
32
- # Load VAE component
33
  vae = AutoencoderKL.from_pretrained(
34
  vae_model,
35
  torch_dtype=torch.float16
36
  )
37
 
38
- # Ensure model and scheduler are initialized in GPU-enabled function
39
  if torch.cuda.is_available():
40
  pipe = StableDiffusionXLPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16).to("cuda")
41
 
42
  pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
43
 
44
-
45
- # Function
46
  @spaces.GPU()
47
  def generate_image(
48
  prompt,
@@ -50,23 +49,21 @@ def generate_image(
50
  width=1024,
51
  height=1024,
52
  scale=1.5,
53
- steps=30,
54
- clip=3):
55
 
56
  prompt = str(translator.translate(prompt, 'English'))
57
 
58
  print(f'prompt:{prompt}')
59
 
60
- image = pipe(
61
  prompt,
62
  negative_prompt=negative,
63
  width=width,
64
  height=height,
65
  guidance_scale=scale,
66
  num_inference_steps=steps,
67
- clip_skip=clip,
68
- )
69
- return image.images[0], image.images[1] # Return two images
70
 
71
 
72
  examples = [
@@ -83,7 +80,7 @@ examples = [
83
  ]
84
 
85
 
86
- # Gradio Interface
87
 
88
  with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
89
  gr.HTML("<h1><center>๋‚˜๋งŒ์˜ ๋ชจ๋ธ ์บ๋ฆญํ„ฐ ์ƒ์„ฑ</center></h1>")
@@ -126,13 +123,6 @@ with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
126
  step=1,
127
  value=50,
128
  )
129
- clip = gr.Slider(
130
- label="Clip Skip",
131
- minimum=1,
132
- maximum=10,
133
- step=1,
134
- value=3,
135
- )
136
  gr.Examples(
137
  examples=examples,
138
  inputs=prompt,
@@ -142,11 +132,11 @@ with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
142
  )
143
 
144
  prompt.submit(fn=generate_image,
145
- inputs=[prompt, negative, width, height, scale, steps, clip],
146
  outputs=[img1, img2],
147
  )
148
  submit.click(fn=generate_image,
149
- inputs=[prompt, negative, width, height, scale, steps, clip],
150
  outputs=[img1, img2],
151
  )
152
 
 
9
 
10
  translator = Translator()
11
 
12
+ # ์ƒ์ˆ˜ ์ •์˜
13
  model = "Corcelio/mobius"
14
  vae_model = "madebyollin/sdxl-vae-fp16-fix"
15
 
 
29
  }
30
  }"""
31
 
32
+ # VAE ์ปดํฌ๋„ŒํŠธ ๋กœ๋“œ
33
  vae = AutoencoderKL.from_pretrained(
34
  vae_model,
35
  torch_dtype=torch.float16
36
  )
37
 
38
+ # GPU ์‚ฌ์šฉ ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ ๋ชจ๋ธ ๋ฐ ์Šค์ผ€์ค„๋Ÿฌ ์ดˆ๊ธฐํ™”
39
  if torch.cuda.is_available():
40
  pipe = StableDiffusionXLPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16).to("cuda")
41
 
42
  pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
43
 
44
+ # ํ•จ์ˆ˜ ์ •์˜
 
45
  @spaces.GPU()
46
  def generate_image(
47
  prompt,
 
49
  width=1024,
50
  height=1024,
51
  scale=1.5,
52
+ steps=30):
 
53
 
54
  prompt = str(translator.translate(prompt, 'English'))
55
 
56
  print(f'prompt:{prompt}')
57
 
58
+ images = pipe(
59
  prompt,
60
  negative_prompt=negative,
61
  width=width,
62
  height=height,
63
  guidance_scale=scale,
64
  num_inference_steps=steps,
65
+ ).images
66
+ return images[0], images[1] # ๋‘ ์ด๋ฏธ์ง€๋ฅผ ๋ฐ˜ํ™˜
 
67
 
68
 
69
  examples = [
 
80
  ]
81
 
82
 
83
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค
84
 
85
  with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
86
  gr.HTML("<h1><center>๋‚˜๋งŒ์˜ ๋ชจ๋ธ ์บ๋ฆญํ„ฐ ์ƒ์„ฑ</center></h1>")
 
123
  step=1,
124
  value=50,
125
  )
 
 
 
 
 
 
 
126
  gr.Examples(
127
  examples=examples,
128
  inputs=prompt,
 
132
  )
133
 
134
  prompt.submit(fn=generate_image,
135
+ inputs=[prompt, negative, width, height, scale, steps],
136
  outputs=[img1, img2],
137
  )
138
  submit.click(fn=generate_image,
139
+ inputs=[prompt, negative, width, height, scale, steps],
140
  outputs=[img1, img2],
141
  )
142