anushka81
commited on
Commit
·
66a73ae
1
Parent(s):
2b30158
trained model import using gradio
Browse files- app.py +51 -0
- requirements.txt +4 -0
app.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
|
3 |
+
import torch
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
def text_to_image(prompt, negative_prompt, guidance_scale, num_inference_steps):
|
7 |
+
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
|
8 |
+
image = pipe(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
|
9 |
+
return image
|
10 |
+
|
11 |
+
def image_to_image(prompt, negative_prompt, init_image, strength, guidance_scale, num_inference_steps):
|
12 |
+
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
|
13 |
+
init_image = init_image.convert("RGB").resize((512, 512))
|
14 |
+
image = pipe(prompt, negative_prompt=negative_prompt, init_image=init_image, strength=strength, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
|
15 |
+
return image
|
16 |
+
|
17 |
+
with gr.Blocks(theme='Respair/Shiki@1.2.2') as demo:
|
18 |
+
gr.Markdown("# Stable Diffusion: Text-to-Image and Image-to-Image")
|
19 |
+
|
20 |
+
with gr.Tab("Text-to-Image"):
|
21 |
+
with gr.Row():
|
22 |
+
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter your text here...")
|
23 |
+
text_negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter what to avoid...")
|
24 |
+
with gr.Row():
|
25 |
+
guidance_scale = gr.Slider(1, 20, value=7.5, step=0.1, label="Guidance Scale")
|
26 |
+
num_inference_steps = gr.Slider(10, 100, value=50, step=1, label="Inference Steps")
|
27 |
+
with gr.Row():
|
28 |
+
generate_btn = gr.Button("Generate", elem_classes=["primary-button"])
|
29 |
+
with gr.Row():
|
30 |
+
text_output = gr.Image(label="Generated Image")
|
31 |
+
|
32 |
+
generate_btn.click(text_to_image, inputs=[text_prompt, text_negative_prompt, guidance_scale, num_inference_steps], outputs=text_output)
|
33 |
+
|
34 |
+
with gr.Tab("Image-to-Image"):
|
35 |
+
with gr.Row():
|
36 |
+
init_image = gr.Image(source="upload", tool="editor", type="pil", label="Initial Image")
|
37 |
+
with gr.Row():
|
38 |
+
img_prompt = gr.Textbox(label="Prompt", placeholder="Describe modifications...")
|
39 |
+
img_negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter what to avoid...")
|
40 |
+
with gr.Row():
|
41 |
+
strength = gr.Slider(0.1, 1.0, value=0.75, step=0.05, label="Strength")
|
42 |
+
img_guidance_scale = gr.Slider(1, 20, value=7.5, step=0.1, label="Guidance Scale")
|
43 |
+
img_num_inference_steps = gr.Slider(10, 100, value=50, step=1, label="Inference Steps")
|
44 |
+
with gr.Row():
|
45 |
+
img_generate_btn = gr.Button("Generate", elem_classes=["primary-button"])
|
46 |
+
with gr.Row():
|
47 |
+
img_output = gr.Image(label="Modified Image")
|
48 |
+
|
49 |
+
img_generate_btn.click(image_to_image, inputs=[img_prompt, img_negative_prompt, init_image, strength, img_guidance_scale, img_num_inference_steps], outputs=img_output)
|
50 |
+
|
51 |
+
demo.launch(share=True)
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch==2.0.1
|
2 |
+
diffusers==0.18.2
|
3 |
+
gradio==3.35.2
|
4 |
+
Pillow==9.5.0
|