gaur3009 commited on
Commit
7d5f3f6
·
verified ·
1 Parent(s): 437efb3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +239 -169
app.py CHANGED
@@ -1,183 +1,253 @@
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
- from diffusers import DiffusionPipeline
5
- import torch
6
-
7
- device = "cuda" if torch.cuda.is_available() else "cpu"
8
-
9
- if torch.cuda.is_available():
10
- torch.cuda.max_memory_allocated(device=device)
11
- pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
- pipe.enable_xformers_memory_efficient_attention()
13
- pipe = pipe.to(device)
14
- else:
15
- pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", use_safetensors=True)
16
- pipe = pipe.to(device)
17
-
18
- MAX_SEED = np.iinfo(np.int32).max
19
- MAX_IMAGE_SIZE = 1024
20
-
21
- def infer(prompt_part1, color, dress_type, design, prompt_part5, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
22
- prompt = f"{prompt_part1} {color} colored plain {dress_type} with {design} design, {prompt_part5}"
23
-
24
  if randomize_seed:
25
  seed = random.randint(0, MAX_SEED)
26
-
27
- generator = torch.Generator().manual_seed(seed)
28
-
29
- image = pipe(
30
- prompt=prompt,
31
- negative_prompt=negative_prompt,
32
- guidance_scale=guidance_scale,
33
- num_inference_steps=num_inference_steps,
34
- width=width,
35
- height=height,
36
- generator=generator
37
- ).images[0]
38
-
39
- return image
40
-
41
- examples = [
42
- "red, t-shirt, yellow stripes",
43
- "blue, hoodie, minimalist",
44
- "red, sweat shirt, geometric design",
45
- ]
46
-
47
- css = """
48
- #col-container {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  margin: 0 auto;
50
- max-width: 520px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  }
52
  """
53
 
54
- if torch.cuda.is_available():
55
- power_device = "GPU"
56
- else:
57
- power_device = "CPU"
58
-
59
- with gr.Blocks(css=css) as demo:
60
-
61
- with gr.Column(elem_id="col-container"):
62
- gr.Markdown(f"""
63
- # Text-to-Image Gradio Template
64
- Currently running on {power_device}.
65
- """)
66
-
67
- with gr.Row():
68
-
69
- prompt_part1 = gr.Textbox(
70
- value="a single",
71
- label="Prompt Part 1",
72
- show_label=False,
73
- interactive=False,
74
- container=False,
75
- elem_id="prompt_part1",
76
- visible=False,
77
- )
78
-
79
- prompt_part2 = gr.Textbox(
80
- label="color",
81
- show_label=False,
82
- max_lines=1,
83
- placeholder="color (e.g., color category)",
84
- container=False,
85
- )
86
-
87
- prompt_part3 = gr.Textbox(
88
- label="dress_type",
89
- show_label=False,
90
- max_lines=1,
91
- placeholder="dress_type (e.g., t-shirt, sweatshirt, shirt, hoodie)",
92
- container=False,
93
- )
94
-
95
- prompt_part4 = gr.Textbox(
96
- label="design",
97
- show_label=False,
98
- max_lines=1,
99
- placeholder="design",
100
- container=False,
101
- )
102
-
103
- prompt_part5 = gr.Textbox(
104
- value="hanging on the plain wall",
105
- label="Prompt Part 5",
106
- show_label=False,
107
- interactive=False,
108
- container=False,
109
- elem_id="prompt_part5",
110
- visible=False,
111
- )
112
-
113
- run_button = gr.Button("Run", scale=0)
114
-
115
- result = gr.Image(label="Result", show_label=False)
116
-
117
- with gr.Accordion("Advanced Settings", open=False):
118
-
119
- negative_prompt = gr.Textbox(
120
- label="Negative prompt",
121
- max_lines=1,
122
- placeholder="Enter a negative prompt",
123
- visible=False,
124
  )
125
-
126
- seed = gr.Slider(
127
- label="Seed",
128
- minimum=0,
129
- maximum=MAX_SEED,
130
- step=1,
131
- value=0,
132
  )
133
-
134
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
135
-
136
  with gr.Row():
137
-
138
- width = gr.Slider(
139
- label="Width",
140
- minimum=256,
141
- maximum=MAX_IMAGE_SIZE,
142
- step=32,
143
- value=512,
144
- )
145
-
146
- height = gr.Slider(
147
- label="Height",
148
- minimum=256,
149
- maximum=MAX_IMAGE_SIZE,
150
- step=32,
151
- value=512,
152
- )
153
-
154
- with gr.Row():
155
-
156
- guidance_scale = gr.Slider(
157
- label="Guidance scale",
158
- minimum=0.0,
159
- maximum=10.0,
160
- step=0.1,
161
- value=0.0,
162
- )
163
-
164
- num_inference_steps = gr.Slider(
165
- label="Number of inference steps",
166
- minimum=1,
167
- maximum=12,
168
  step=1,
169
- value=2,
170
  )
171
-
172
- gr.Examples(
173
- examples=examples,
174
- inputs=[prompt_part2]
175
- )
176
-
177
- run_button.click(
178
- fn=infer,
179
- inputs=[prompt_part1, prompt_part2, prompt_part3, prompt_part4, prompt_part5, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
180
- outputs=[result]
181
- )
182
-
183
- demo.queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ from PIL import Image
4
  import gradio as gr
5
  import numpy as np
6
  import random
7
+ import base64
8
+ import requests
9
+ import json
10
+ import time
11
+ from requests.adapters import HTTPAdapter
12
+
13
+ def tryon(person_img, garment_img, seed, randomize_seed):
14
+ post_start_time = time.time()
15
+ if person_img is None or garment_img is None:
16
+ return None, None, "Empty image"
 
 
 
 
 
 
 
 
 
 
17
  if randomize_seed:
18
  seed = random.randint(0, MAX_SEED)
19
+ encoded_person_img = cv2.imencode('.jpg', cv2.cvtColor(person_img, cv2.COLOR_RGB2BGR))[1].tobytes()
20
+ encoded_person_img = base64.b64encode(encoded_person_img).decode('utf-8')
21
+ encoded_garment_img = cv2.imencode('.jpg', cv2.cvtColor(garment_img, cv2.COLOR_RGB2BGR))[1].tobytes()
22
+ encoded_garment_img = base64.b64encode(encoded_garment_img).decode('utf-8')
23
+
24
+ url = "http://" + os.environ['tryon_url'] + "Submit"
25
+ token = os.environ['token']
26
+ cookie = os.environ['Cookie']
27
+ referer = os.environ['referer']
28
+ headers = {'Content-Type': 'application/json', 'token': token, 'Cookie': cookie, 'referer': referer}
29
+ data = {
30
+ "clothImage": encoded_garment_img,
31
+ "humanImage": encoded_person_img,
32
+ "seed": seed
33
+ }
34
+ try:
35
+ response = requests.post(url, headers=headers, data=json.dumps(data), timeout=50)
36
+ print("post response code", response.status_code)
37
+ if response.status_code == 200:
38
+ result = response.json()['result']
39
+ status = result['status']
40
+ if status == "success":
41
+ uuid = result['result']
42
+ print(uuid)
43
+ except Exception as err:
44
+ print(f"Error: {err}")
45
+ raise gr.Error("Too many users, please try again later")
46
+ post_end_time = time.time()
47
+ print(f"post time used: {post_end_time-post_start_time}")
48
+
49
+ get_start_time =time.time()
50
+ time.sleep(9)
51
+ Max_Retry = 10
52
+ result_img = None
53
+ for i in range(Max_Retry):
54
+ try:
55
+ url = "http://" + os.environ['tryon_url'] + "Query?taskId=" + uuid
56
+ response = requests.get(url, headers=headers, timeout=15)
57
+ print("get response code", response.status_code)
58
+ if response.status_code == 200:
59
+ result = response.json()['result']
60
+ status = result['status']
61
+ if status == "success":
62
+ result = base64.b64decode(result['result'])
63
+ result_np = np.frombuffer(result, np.uint8)
64
+ result_img = cv2.imdecode(result_np, cv2.IMREAD_UNCHANGED)
65
+ result_img = cv2.cvtColor(result_img, cv2.COLOR_RGB2BGR)
66
+ info = "Success"
67
+ break
68
+ elif status == "error":
69
+ raise gr.Error("Too many users, please try again later")
70
+ else:
71
+ print(response.text)
72
+ info = "URL error, pleace contact the admin"
73
+ except requests.exceptions.ReadTimeout:
74
+ print("timeout")
75
+ info = "Too many users, please try again later"
76
+ time.sleep(1)
77
+ get_end_time = time.time()
78
+ print(f"get time used: {get_end_time-get_start_time}")
79
+
80
+ return result_img, seed, info
81
+
82
+ def start_tryon(person_img, garment_img, seed, randomize_seed):
83
+ start_time = time.time()
84
+ if person_img is None or garment_img is None:
85
+ return None, None, "Empty image"
86
+ if randomize_seed:
87
+ seed = random.randint(0, MAX_SEED)
88
+ encoded_person_img = cv2.imencode('.jpg', cv2.cvtColor(person_img, cv2.COLOR_RGB2BGR))[1].tobytes()
89
+ encoded_person_img = base64.b64encode(encoded_person_img).decode('utf-8')
90
+ encoded_garment_img = cv2.imencode('.jpg', cv2.cvtColor(garment_img, cv2.COLOR_RGB2BGR))[1].tobytes()
91
+ encoded_garment_img = base64.b64encode(encoded_garment_img).decode('utf-8')
92
+
93
+ url = "http://" + os.environ['tryon_url']
94
+ token = os.environ['token']
95
+ cookie = os.environ['Cookie']
96
+ referer = os.environ['referer']
97
+
98
+ headers = {'Content-Type': 'application/json', 'token': token, 'Cookie': cookie, 'referer': referer}
99
+ data = {
100
+ "clothImage": encoded_garment_img,
101
+ "humanImage": encoded_person_img,
102
+ "seed": seed
103
+ }
104
+
105
+ result_img = None
106
+ try:
107
+ session = requests.Session()
108
+ response = session.post(url, headers=headers, data=json.dumps(data), timeout=60)
109
+ print("response code", response.status_code)
110
+ if response.status_code == 200:
111
+ result = response.json()['result']
112
+ status = result['status']
113
+ if status == "success":
114
+ result = base64.b64decode(result['result'])
115
+ result_np = np.frombuffer(result, np.uint8)
116
+ result_img = cv2.imdecode(result_np, cv2.IMREAD_UNCHANGED)
117
+ result_img = cv2.cvtColor(result_img, cv2.COLOR_RGB2BGR)
118
+ info = "Success"
119
+ else:
120
+ info = "Try again latter"
121
+ else:
122
+ print(response.text)
123
+ info = "URL error, pleace contact the admin"
124
+ except requests.exceptions.ReadTimeout:
125
+ print("timeout")
126
+ info = "Too many users, please try again later"
127
+ raise gr.Error("Too many users, please try again later")
128
+ except Exception as err:
129
+ print(f"其他错误: {err}")
130
+ info = "Error, pleace contact the admin"
131
+ end_time = time.time()
132
+ print(f"time used: {end_time-start_time}")
133
+
134
+ return result_img, seed, info
135
+
136
+ MAX_SEED = 999999
137
+
138
+ example_path = os.path.join(os.path.dirname(__file__), 'assets')
139
+
140
+ garm_list = os.listdir(os.path.join(example_path,"cloth"))
141
+ garm_list_path = [os.path.join(example_path,"cloth",garm) for garm in garm_list]
142
+
143
+ human_list = os.listdir(os.path.join(example_path,"human"))
144
+ human_list_path = [os.path.join(example_path,"human",human) for human in human_list]
145
+
146
+ css="""
147
+ #col-left {
148
  margin: 0 auto;
149
+ max-width: 430px;
150
+ }
151
+ #col-mid {
152
+ margin: 0 auto;
153
+ max-width: 430px;
154
+ }
155
+ #col-right {
156
+ margin: 0 auto;
157
+ max-width: 430px;
158
+ }
159
+ #col-showcase {
160
+ margin: 0 auto;
161
+ max-width: 1100px;
162
+ }
163
+ #button {
164
+ color: blue;
165
  }
166
  """
167
 
168
+ def load_description(fp):
169
+ with open(fp, 'r', encoding='utf-8') as f:
170
+ content = f.read()
171
+ return content
172
+
173
+ def change_imgs(image1, image2):
174
+ return image1, image2
175
+
176
+ with gr.Blocks(css=css) as Tryon:
177
+ gr.HTML(load_description("assets/title.md"))
178
+ with gr.Row():
179
+ with gr.Column(elem_id = "col-left"):
180
+ gr.HTML("""
181
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
182
+ <div>
183
+ Step 1. Upload a person image ⬇️
184
+ </div>
185
+ </div>
186
+ """)
187
+ with gr.Column(elem_id = "col-mid"):
188
+ gr.HTML("""
189
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
190
+ <div>
191
+ Step 2. Upload a garment image ⬇️
192
+ </div>
193
+ </div>
194
+ """)
195
+ with gr.Column(elem_id = "col-right"):
196
+ gr.HTML("""
197
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
198
+ <div>
199
+ Step 3. Press “Run” to get try-on results
200
+ </div>
201
+ </div>
202
+ """)
203
+ with gr.Row():
204
+ with gr.Column(elem_id = "col-left"):
205
+ imgs = gr.Image(label="Person image", sources='upload', type="numpy")
206
+ # category = gr.Dropdown(label="Garment category", choices=['upper_body', 'lower_body', 'dresses'], value="upper_body")
207
+ example = gr.Examples(
208
+ inputs=imgs,
209
+ examples_per_page=12,
210
+ examples=human_list_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  )
212
+ with gr.Column(elem_id = "col-mid"):
213
+ garm_img = gr.Image(label="Garment image", sources='upload', type="numpy")
214
+ example = gr.Examples(
215
+ inputs=garm_img,
216
+ examples_per_page=12,
217
+ examples=garm_list_path
 
218
  )
219
+ with gr.Column(elem_id = "col-right"):
220
+ image_out = gr.Image(label="Result", show_share_button=False)
 
221
  with gr.Row():
222
+ seed = gr.Slider(
223
+ label="Seed",
224
+ minimum=0,
225
+ maximum=MAX_SEED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
  step=1,
227
+ value=0,
228
  )
229
+ randomize_seed = gr.Checkbox(label="Random seed", value=True)
230
+ with gr.Row():
231
+ seed_used = gr.Number(label="Seed used")
232
+ result_info = gr.Text(label="Response")
233
+ # try_button = gr.Button(value="Run", elem_id="button")
234
+ test_button = gr.Button(value="Run", elem_id="button")
235
+
236
+
237
+ # try_button.click(fn=start_tryon, inputs=[imgs, garm_img, seed, randomize_seed], outputs=[image_out, seed_used, result_info], api_name='tryon',concurrency_limit=10)
238
+ test_button.click(fn=tryon, inputs=[imgs, garm_img, seed, randomize_seed], outputs=[image_out, seed_used, result_info], api_name='tryon',concurrency_limit=10)
239
+
240
+ with gr.Column(elem_id = "col-showcase"):
241
+ gr.HTML("""
242
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
243
+ <div> </div>
244
+ <br>
245
+ <div>
246
+ Virtual try-on examples in pairs of person and garment images
247
+ </div>
248
+ </div>
249
+ """)
250
+
251
+ # ip = requests.get('http://ifconfig.me/ip', timeout=1).text.strip()
252
+ # print("ip address", ip)
253
+ Tryon.queue(max_size = 20).launch(max_threads = 5)