gaur3009 commited on
Commit
a8602f1
·
verified ·
1 Parent(s): c951bbe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -40
app.py CHANGED
@@ -3,7 +3,7 @@ import numpy as np
3
  import random
4
  from diffusers import DiffusionPipeline
5
  import torch
6
- from PIL import Image, ImageOps
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
@@ -19,7 +19,7 @@ else:
19
  MAX_SEED = np.iinfo(np.int32).max
20
  MAX_IMAGE_SIZE = 1024
21
 
22
- def infer(prompt_part1, color, dress_type, front_design, back_design, prompt_part5, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
23
  front_prompt = f"front view of {prompt_part1} {color} colored plain {dress_type} with {front_design} design, {prompt_part5}"
24
  back_prompt = f"back view of {prompt_part1} {color} colored plain {dress_type} with {back_design} design, {prompt_part5}"
25
 
@@ -30,7 +30,6 @@ def infer(prompt_part1, color, dress_type, front_design, back_design, prompt_par
30
 
31
  front_image = pipe(
32
  prompt=front_prompt,
33
- negative_prompt=negative_prompt,
34
  guidance_scale=guidance_scale,
35
  num_inference_steps=num_inference_steps,
36
  width=width,
@@ -40,7 +39,6 @@ def infer(prompt_part1, color, dress_type, front_design, back_design, prompt_par
40
 
41
  back_image = pipe(
42
  prompt=back_prompt,
43
- negative_prompt=negative_prompt,
44
  guidance_scale=guidance_scale,
45
  num_inference_steps=num_inference_steps,
46
  width=width,
@@ -50,17 +48,6 @@ def infer(prompt_part1, color, dress_type, front_design, back_design, prompt_par
50
 
51
  return front_image, back_image
52
 
53
- examples = [
54
- ["red", "t-shirt", "yellow stripes", "polka dots"],
55
- ["blue", "hoodie", "minimalist", "abstract art"],
56
- ["red", "sweat shirt", "geometric design", "plain"],
57
- ]
58
-
59
- if torch.cuda.is_available():
60
- power_device = "GPU"
61
- else:
62
- power_device = "CPU"
63
-
64
  def edit_image(img_data, operation, *args):
65
  image = Image.open(img_data)
66
 
@@ -78,48 +65,68 @@ def edit_image(img_data, operation, *args):
78
  image = ImageOps.mirror(image)
79
  else:
80
  image = ImageOps.flip(image)
 
 
 
81
 
82
  return image
83
 
 
 
 
 
 
 
 
 
 
 
 
84
  with gr.Blocks() as demo:
85
-
86
- with gr.Column():
87
  gr.Markdown(f"""
88
  # GenZ Couture
89
  Currently running on {power_device}.
90
  """)
91
-
92
- prompt_part1 = gr.Textbox(value="a single", label="Prompt Part 1", visible=False)
93
- prompt_part2 = gr.Textbox(label="color", placeholder="color (e.g., red, blue)")
94
- prompt_part3 = gr.Textbox(label="dress_type", placeholder="dress_type (e.g., t-shirt, hoodie)")
95
- prompt_part4_front = gr.Textbox(label="front design", placeholder="front design")
96
- prompt_part4_back = gr.Textbox(label="back design", placeholder="back design")
97
- prompt_part5 = gr.Textbox(value="hanging on the plain wall", label="Prompt Part 5", visible=False)
98
-
99
- run_button = gr.Button("Generate Designs")
100
 
101
- front_result = gr.Image(label="Front View Result", type="pil", interactive=True)
102
- back_result = gr.Image(label="Back View Result", type="pil", interactive=True)
103
-
104
- gr.Examples(examples=examples, inputs=[prompt_part2, prompt_part3, prompt_part4_front, prompt_part4_back])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
  run_button.click(
107
  fn=infer,
108
- inputs=[prompt_part1, prompt_part2, prompt_part3, prompt_part4_front, prompt_part4_back, prompt_part5],
109
  outputs=[front_result, back_result]
110
  )
111
 
112
  gr.Markdown("## Creative Touch")
113
 
114
- edit_operation = gr.Dropdown(choices=["rotate", "crop", "resize", "flip"], label="Edit Operation")
115
- edit_args = gr.Textbox(label="Edit Arguments (comma-separated)", placeholder="For rotate: angle, For crop: left,top,right,bottom, For resize: width,height, For flip: horizontal/vertical")
116
-
117
- edit_button = gr.Button("Edit Front Design")
118
 
119
- edit_button.click(
120
- fn=lambda img_data, operation, args: edit_image(img_data, operation, *args.split(',')),
121
- inputs=[front_result, edit_operation, edit_args],
122
- outputs=[front_result]
123
- )
 
 
 
124
 
125
  demo.queue().launch()
 
3
  import random
4
  from diffusers import DiffusionPipeline
5
  import torch
6
+ from PIL import Image, ImageOps, ImageEnhance
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
 
19
  MAX_SEED = np.iinfo(np.int32).max
20
  MAX_IMAGE_SIZE = 1024
21
 
22
+ def infer(prompt_part1, color, dress_type, front_design, back_design, prompt_part5, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
23
  front_prompt = f"front view of {prompt_part1} {color} colored plain {dress_type} with {front_design} design, {prompt_part5}"
24
  back_prompt = f"back view of {prompt_part1} {color} colored plain {dress_type} with {back_design} design, {prompt_part5}"
25
 
 
30
 
31
  front_image = pipe(
32
  prompt=front_prompt,
 
33
  guidance_scale=guidance_scale,
34
  num_inference_steps=num_inference_steps,
35
  width=width,
 
39
 
40
  back_image = pipe(
41
  prompt=back_prompt,
 
42
  guidance_scale=guidance_scale,
43
  num_inference_steps=num_inference_steps,
44
  width=width,
 
48
 
49
  return front_image, back_image
50
 
 
 
 
 
 
 
 
 
 
 
 
51
  def edit_image(img_data, operation, *args):
52
  image = Image.open(img_data)
53
 
 
65
  image = ImageOps.mirror(image)
66
  else:
67
  image = ImageOps.flip(image)
68
+ elif operation == "color":
69
+ factor = float(args[0])
70
+ image = ImageEnhance.Color(image).enhance(factor)
71
 
72
  return image
73
 
74
+ examples = [
75
+ ["red", "t-shirt", "yellow stripes", "polka dots"],
76
+ ["blue", "hoodie", "minimalist", "abstract art"],
77
+ ["red", "sweat shirt", "geometric design", "plain"],
78
+ ]
79
+
80
+ if torch.cuda.is_available():
81
+ power_device = "GPU"
82
+ else:
83
+ power_device = "CPU"
84
+
85
  with gr.Blocks() as demo:
86
+ with gr.Row():
 
87
  gr.Markdown(f"""
88
  # GenZ Couture
89
  Currently running on {power_device}.
90
  """)
 
 
 
 
 
 
 
 
 
91
 
92
+ with gr.Row():
93
+ with gr.Column():
94
+ prompt_part2 = gr.Textbox(label="Color", placeholder="Color (e.g., red, blue)")
95
+ prompt_part3 = gr.Textbox(label="Dress Type", placeholder="Dress Type (e.g., t-shirt, hoodie)")
96
+ prompt_part4_front = gr.Textbox(label="Front Design", placeholder="Front Design")
97
+ prompt_part4_back = gr.Textbox(label="Back Design", placeholder="Back Design")
98
+ seed = gr.Slider(0, MAX_SEED, step=1, label="Seed", value=42)
99
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
100
+ width = gr.Slider(256, MAX_IMAGE_SIZE, step=32, label="Width", value=512)
101
+ height = gr.Slider(256, MAX_IMAGE_SIZE, step=32, label="Height", value=512)
102
+ guidance_scale = gr.Slider(1, 20, step=0.5, label="Guidance Scale", value=7.5)
103
+ num_inference_steps = gr.Slider(10, 100, step=1, label="Number of Inference Steps", value=50)
104
+
105
+ run_button = gr.Button("Generate Designs")
106
+
107
+ with gr.Column():
108
+ front_result = gr.Image(label="Front View Result", type="pil", interactive=True)
109
+ back_result = gr.Image(label="Back View Result", type="pil", interactive=True)
110
 
111
  run_button.click(
112
  fn=infer,
113
+ inputs=["a single", prompt_part2, prompt_part3, prompt_part4_front, prompt_part4_back, "hanging on the plain wall", seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
114
  outputs=[front_result, back_result]
115
  )
116
 
117
  gr.Markdown("## Creative Touch")
118
 
119
+ with gr.Row():
120
+ edit_operation = gr.Dropdown(choices=["rotate", "crop", "resize", "flip", "color"], label="Edit Operation")
121
+ edit_args = gr.Textbox(label="Edit Arguments (comma-separated)", placeholder="For rotate: angle, For crop: left,top,right,bottom, For resize: width,height, For flip: horizontal/vertical, For color: factor")
 
122
 
123
+ edit_button = gr.Button("Edit Front Design")
124
+ edited_image = gr.Image(label="Edited Front Design", type="pil", interactive=True)
125
+
126
+ edit_button.click(
127
+ fn=lambda img_data, operation, args: edit_image(img_data, operation, *args.split(',')),
128
+ inputs=[front_result, edit_operation, edit_args],
129
+ outputs=[edited_image]
130
+ )
131
 
132
  demo.queue().launch()