File size: 4,623 Bytes
408242e
 
 
 
 
e12ff47
408242e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e12ff47
 
 
 
e910e8f
e12ff47
 
e910e8f
e12ff47
 
e910e8f
e12ff47
 
 
 
 
 
 
 
 
 
408242e
e12ff47
408242e
e12ff47
408242e
 
 
e12ff47
 
 
 
 
 
408242e
e910e8f
408242e
ef06d46
 
408242e
e12ff47
408242e
 
 
e12ff47
408242e
 
 
e12ff47
408242e
e12ff47
 
 
 
 
 
e910e8f
e12ff47
 
408242e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline
import torch
from PIL import Image, ImageOps

device = "cuda" if torch.cuda.is_available() else "cpu"

if torch.cuda.is_available():
    torch.cuda.max_memory_allocated(device=device)
    pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
    pipe.enable_xformers_memory_efficient_attention()
    pipe = pipe.to(device)
else:
    pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
    pipe = pipe.to(device)

MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024

def infer(prompt_part1, color, dress_type, front_design, back_design, prompt_part5, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
    front_prompt = f"front view of {prompt_part1} {color} colored plain {dress_type} with {front_design} design, {prompt_part5}"
    back_prompt = f"back view of {prompt_part1} {color} colored plain {dress_type} with {back_design} design, {prompt_part5}"
    
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
        
    generator = torch.Generator().manual_seed(seed)
    
    front_image = pipe(
        prompt=front_prompt, 
        negative_prompt=negative_prompt,
        guidance_scale=guidance_scale, 
        num_inference_steps=num_inference_steps, 
        width=width, 
        height=height,
        generator=generator
    ).images[0]
    
    back_image = pipe(
        prompt=back_prompt, 
        negative_prompt=negative_prompt,
        guidance_scale=guidance_scale, 
        num_inference_steps=num_inference_steps, 
        width=width, 
        height=height,
        generator=generator
    ).images[0]
    
    return front_image, back_image

examples = [
    ["red", "t-shirt", "yellow stripes", "polka dots"],
    ["blue", "hoodie", "minimalist", "abstract art"],
    ["red", "sweat shirt", "geometric design", "plain"],
]

if torch.cuda.is_available():
    power_device = "GPU"
else:
    power_device = "CPU"

def edit_image(img_data, operation, *args):
    image = Image.open(img_data)
    
    if operation == "rotate":
        angle = int(args[0])
        image = image.rotate(angle, expand=True)
    elif operation == "crop":
        left, top, right, bottom = map(int, args)
        image = image.crop((left, top, right, bottom))
    elif operation == "resize":
        width, height = map(int, args)
        image = image.resize((width, height))
    elif operation == "flip":
        if args[0] == "horizontal":
            image = ImageOps.mirror(image)
        else:
            image = ImageOps.flip(image)
    
    return image

with gr.Blocks() as demo:
    
    with gr.Column():
        gr.Markdown(f"""
        # GenZ Couture
        Currently running on {power_device}.
        """)
        
        prompt_part1 = gr.Textbox(value="a single", label="Prompt Part 1")
        prompt_part2 = gr.Textbox(label="color", placeholder="color (e.g., red, blue)")
        prompt_part3 = gr.Textbox(label="dress_type", placeholder="dress_type (e.g., t-shirt, hoodie)")
        prompt_part4_front = gr.Textbox(label="front design", placeholder="front design")
        prompt_part4_back = gr.Textbox(label="back design", placeholder="back design")
        prompt_part5 = gr.Textbox(value="hanging on the plain wall", label="Prompt Part 5")
        
        run_button = gr.Button("Generate Designs")

        front_result = gr.Image(label="Front View Result", type="pil", interactive=True)
        back_result = gr.Image(label="Back View Result", type="pil", interactive=True)

        gr.Examples(examples=examples, inputs=[prompt_part2, prompt_part3, prompt_part4_front, prompt_part4_back])

    run_button.click(
        fn=infer,
        inputs=[prompt_part1, prompt_part2, prompt_part3, prompt_part4_front, prompt_part4_back, prompt_part5],
        outputs=[front_result, back_result]
    )

    gr.Markdown("## Creative Touch")

    edit_operation = gr.Dropdown(choices=["rotate", "crop", "resize", "flip"], label="Edit Operation")
    edit_args = gr.Textbox(label="Edit Arguments (comma-separated)", placeholder="For rotate: angle, For crop: left,top,right,bottom, For resize: width,height, For flip: horizontal/vertical")
    
    edit_button = gr.Button("Edit Front Design")

    edit_button.click(
        fn=lambda img_data, operation, args: edit_image(img_data, operation, *args.split(',')),
        inputs=[front_result, edit_operation, edit_args],
        outputs=[front_result]
    )

demo.queue().launch()