amirkhanbloch commited on
Commit
9530fd2
·
verified ·
1 Parent(s): c51e0da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -136
app.py CHANGED
@@ -1,137 +1,32 @@
1
- import gradio as gr
2
- import spaces
3
- import numpy as np
4
- import random
5
  import torch
6
- from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, FlowMatchEulerDiscreteScheduler
7
-
8
- device = "cuda" if torch.cuda.is_available() else "cpu"
9
- dtype = torch.float16
10
-
11
- repo = "stabilityai/stable-diffusion-3-medium-diffusers"
12
- pipe = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16).to(device)
13
-
14
- MAX_SEED = np.iinfo(np.int32).max
15
- MAX_IMAGE_SIZE = 1344
16
-
17
- @spaces.GPU
18
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
19
-
20
- if randomize_seed:
21
- seed = random.randint(0, MAX_SEED)
22
-
23
- generator = torch.Generator().manual_seed(seed)
24
-
25
- image = pipe(
26
- prompt = prompt,
27
- negative_prompt = negative_prompt,
28
- guidance_scale = guidance_scale,
29
- num_inference_steps = num_inference_steps,
30
- width = width,
31
- height = height,
32
- generator = generator
33
- ).images[0]
34
-
35
- return image, seed
36
-
37
- examples = [
38
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
39
- "An astronaut riding a green horse",
40
- "A delicious ceviche cheesecake slice",
41
- ]
42
-
43
- css="""
44
- #col-container {
45
- margin: 0 auto;
46
- max-width: 580px;
47
- }
48
- """
49
-
50
- with gr.Blocks(css=css) as demo:
51
-
52
- with gr.Column(elem_id="col-container"):
53
- gr.Markdown(f"""
54
- # Demo [Stable Diffusion 3 Medium](https://huggingface.co/stabilityai/stable-diffusion-3-medium)
55
- Learn more about the [Stable Diffusion 3 series](https://stability.ai/news/stable-diffusion-3). Try on [Stability AI API](https://platform.stability.ai/docs/api-reference#tag/Generate/paths/~1v2beta~1stable-image~1generate~1sd3/post), [Stable Assistant](https://stability.ai/stable-assistant), or on Discord via [Stable Artisan](https://stability.ai/stable-artisan). Run locally with [ComfyUI](https://github.com/comfyanonymous/ComfyUI) or [diffusers](https://github.com/huggingface/diffusers)
56
- """)
57
-
58
- with gr.Row():
59
-
60
- prompt = gr.Text(
61
- label="Prompt",
62
- show_label=False,
63
- max_lines=1,
64
- placeholder="Enter your prompt",
65
- container=False,
66
- )
67
-
68
- run_button = gr.Button("Run", scale=0)
69
-
70
- result = gr.Image(label="Result", show_label=False)
71
-
72
- with gr.Accordion("Advanced Settings", open=False):
73
-
74
- negative_prompt = gr.Text(
75
- label="Negative prompt",
76
- max_lines=1,
77
- placeholder="Enter a negative prompt",
78
- )
79
-
80
- seed = gr.Slider(
81
- label="Seed",
82
- minimum=0,
83
- maximum=MAX_SEED,
84
- step=1,
85
- value=0,
86
- )
87
-
88
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
89
-
90
- with gr.Row():
91
-
92
- width = gr.Slider(
93
- label="Width",
94
- minimum=256,
95
- maximum=MAX_IMAGE_SIZE,
96
- step=64,
97
- value=1024,
98
- )
99
-
100
- height = gr.Slider(
101
- label="Height",
102
- minimum=256,
103
- maximum=MAX_IMAGE_SIZE,
104
- step=64,
105
- value=1024,
106
- )
107
-
108
- with gr.Row():
109
-
110
- guidance_scale = gr.Slider(
111
- label="Guidance scale",
112
- minimum=0.0,
113
- maximum=10.0,
114
- step=0.1,
115
- value=5.0,
116
- )
117
-
118
- num_inference_steps = gr.Slider(
119
- label="Number of inference steps",
120
- minimum=1,
121
- maximum=50,
122
- step=1,
123
- value=28,
124
- )
125
-
126
- gr.Examples(
127
- examples = examples,
128
- inputs = [prompt]
129
- )
130
- gr.on(
131
- triggers=[run_button.click, prompt.submit, negative_prompt.submit],
132
- fn = infer,
133
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
134
- outputs = [result, seed]
135
- )
136
-
137
- demo.launch()
 
 
 
 
 
1
  import torch
2
+ import gradio as gr
3
+ from diffusers import StableDiffusion3Pipeline
4
+
5
+ def image_generator(prompt):
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+ pipeline = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers",
8
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
9
+ text_encoder_3=None,
10
+ tokenizer_3 = None)
11
+ #pipeline.enable_model_cpu_offload()
12
+ pipeline.to(device)
13
+
14
+ image = pipeline(
15
+ prompt=prompt,
16
+ negative_prompt="blurred, ugly, watermark, low, resolution, blurry",
17
+ num_inference_steps=40,
18
+ height=1024,
19
+ width=1024,
20
+ guidance_scale=9.0
21
+ ).images[0]
22
+
23
+ return image
24
+
25
+ interface = gr.Interface(
26
+ fn=image_generator,
27
+ inputs=gr.Textbox(lines=2, placeholder = "Enter your prompt..."),
28
+ outputs=gr.Image(type = "pil"),
29
+ title = "Image Generator App",
30
+ description = "This is a simple image generator app using HuggingFace's Stable Diffusion 3 model.")
31
+ interface.launch()
32
+ print(interface)