rishh76 commited on
Commit
50ab3df
·
verified ·
1 Parent(s): b5eb0cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -157
app.py CHANGED
@@ -11,162 +11,45 @@ import json
11
  import time
12
 
13
 
14
-
15
- # Add a new function for text-to-image generation
16
- def generate_garment_image(prompt):
17
- # This is a placeholder function. You'll need to implement actual text-to-image generation here.
18
- # For example, you might use a service like DALL-E, Stable Diffusion, or any other text-to-image model.
19
- # For now, we'll just return a placeholder image.
20
- placeholder_image = np.zeros((256, 256, 3), dtype=np.uint8)
21
- cv2.putText(placeholder_image, prompt, (10, 128), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
22
- return placeholder_image
23
-
24
  def tryon(person_img, garment_prompt, seed, randomize_seed):
25
  post_start_time = time.time()
26
- if person_img is None or garment_prompt == "":
27
  return None, None, "Empty image or prompt"
28
  if randomize_seed:
29
  seed = random.randint(0, MAX_SEED)
30
-
31
-
32
- # Generate garment image from prompt
33
- garment_img = generate_garment_image(garment_prompt)
34
-
35
  encoded_person_img = cv2.imencode('.jpg', cv2.cvtColor(person_img, cv2.COLOR_RGB2BGR))[1].tobytes()
36
  encoded_person_img = base64.b64encode(encoded_person_img).decode('utf-8')
37
- encoded_garment_img = cv2.imencode('.jpg', cv2.cvtColor(garment_img, cv2.COLOR_RGB2BGR))[1].tobytes()
38
- encoded_garment_img = base64.b64encode(encoded_garment_img).decode('utf-8')
39
-
40
- url = "http://" + os.environ['tryon_url'] + "Submit"
41
- token = os.environ['token']
42
- cookie = os.environ['Cookie']
43
- referer = os.environ['referer']
44
- headers = {'Content-Type': 'application/json', 'token': token, 'Cookie': cookie, 'referer': referer}
45
- data = {
46
- "clothImage": encoded_garment_img,
47
- "humanImage": encoded_person_img,
48
- "seed": seed
49
- }
50
- try:
51
- response = requests.post(url, headers=headers, data=json.dumps(data), timeout=50)
52
- print("post response code", response.status_code)
53
- if response.status_code == 200:
54
- result = response.json()['result']
55
- status = result['status']
56
- if status == "success":
57
- uuid = result['result']
58
- print(uuid)
59
- except Exception as err:
60
- print(f"Error: {err}")
61
- raise gr.Error("Too many users, please try again later")
62
- post_end_time = time.time()
63
- print(f"post time used: {post_end_time-post_start_time}")
64
-
65
- get_start_time =time.time()
66
- time.sleep(9)
67
- Max_Retry = 10
68
- result_img = None
69
- for i in range(Max_Retry):
70
- try:
71
- url = "http://" + os.environ['tryon_url'] + "Query?taskId=" + uuid
72
- response = requests.get(url, headers=headers, timeout=15)
73
- print("get response code", response.status_code)
74
- if response.status_code == 200:
75
- result = response.json()['result']
76
- status = result['status']
77
- if status == "success":
78
- result = base64.b64decode(result['result'])
79
- result_np = np.frombuffer(result, np.uint8)
80
- result_img = cv2.imdecode(result_np, cv2.IMREAD_UNCHANGED)
81
- result_img = cv2.cvtColor(result_img, cv2.COLOR_RGB2BGR)
82
- info = "Success"
83
- break
84
- elif status == "error":
85
- raise gr.Error("Too many users, please try again later")
86
- else:
87
- print(response.text)
88
- info = "URL error, pleace contact the admin"
89
- except requests.exceptions.ReadTimeout:
90
- print("timeout")
91
- info = "Too many users, please try again later"
92
- except Exception as err:
93
- print(f"Error: {err}")
94
- time.sleep(1)
95
- get_end_time = time.time()
96
- print(f"get time used: {get_end_time-get_start_time}")
97
-
98
- return result_img, seed, info
99
-
100
-
101
- def start_tryon(person_img, garment_prompt, seed, randomize_seed):
102
- start_time = time.time()
103
- if person_img is None or garment_prompt == "":
104
- return None, None, "Empty image or prompt"
105
- if randomize_seed:
106
- seed = random.randint(0, MAX_SEED)
107
 
108
- # Generate garment image from prompt
109
- garment_img = generate_garment_image(garment_prompt)
110
 
111
- encoded_person_img = cv2.imencode('.jpg', cv2.cvtColor(person_img, cv2.COLOR_RGB2BGR))[1].tobytes()
112
- encoded_person_img = base64.b64encode(encoded_person_img).decode('utf-8')
113
- encoded_garment_img = cv2.imencode('.jpg', cv2.cvtColor(garment_img, cv2.COLOR_RGB2BGR))[1].tobytes()
114
  encoded_garment_img = base64.b64encode(encoded_garment_img).decode('utf-8')
115
 
116
- url = "http://" + os.environ['tryon_url']
117
- token = os.environ['token']
118
- cookie = os.environ['Cookie']
119
- referer = os.environ['referer']
 
 
120
 
121
- headers = {'Content-Type': 'application/json', 'token': token, 'Cookie': cookie, 'referer': referer}
122
- data = {
123
- "clothImage": encoded_garment_img,
124
- "humanImage": encoded_person_img,
125
- "seed": seed
126
- }
127
 
128
- result_img = None
129
- try:
130
- session = requests.Session()
131
- response = session.post(url, headers=headers, data=json.dumps(data), timeout=60)
132
- print("response code", response.status_code)
133
- if response.status_code == 200:
134
- result = response.json()['result']
135
- status = result['status']
136
- if status == "success":
137
- result = base64.b64decode(result['result'])
138
- result_np = np.frombuffer(result, np.uint8)
139
- result_img = cv2.imdecode(result_np, cv2.IMREAD_UNCHANGED)
140
- result_img = cv2.cvtColor(result_img, cv2.COLOR_RGB2BGR)
141
- info = "Success"
142
- else:
143
- info = "Try again latter"
144
- else:
145
- print(response.text)
146
- info = "URL error, pleace contact the admin"
147
- except requests.exceptions.ReadTimeout:
148
- print("timeout")
149
- info = "Too many users, please try again later"
150
- raise gr.Error("Too many users, please try again later")
151
- except Exception as err:
152
- print(f"其他错误: {err}")
153
- info = "Error, pleace contact the admin"
154
- end_time = time.time()
155
- print(f"time used: {end_time-start_time}")
156
 
157
- return result_img, seed, info
158
 
159
  MAX_SEED = 999999
160
 
161
-
162
-
163
-
164
  example_path = os.path.join(os.path.dirname(__file__), 'assets')
165
 
166
- human_list = os.listdir(os.path.join(example_path,"human"))
167
- human_list_path = [os.path.join(example_path,"human",human) for human in human_list]
168
 
169
- css="""
170
  #col-left {
171
  margin: 0 auto;
172
  max-width: 430px;
@@ -193,10 +76,11 @@ def load_description(fp):
193
  content = f.read()
194
  return content
195
 
 
196
  with gr.Blocks(css=css) as Tryon:
197
  gr.HTML(load_description("assets/title.md"))
198
  with gr.Row():
199
- with gr.Column(elem_id = "col-left"):
200
  gr.HTML("""
201
  <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
202
  <div>
@@ -204,37 +88,33 @@ with gr.Blocks(css=css) as Tryon:
204
  </div>
205
  </div>
206
  """)
207
- with gr.Column(elem_id = "col-mid"):
208
  gr.HTML("""
209
  <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
210
  <div>
211
- Step 2. Enter a garment description ⬇️
212
  </div>
213
  </div>
214
  """)
215
- with gr.Column(elem_id = "col-right"):
216
  gr.HTML("""
217
  <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
218
  <div>
219
- Step 3. Press "Run" to get try-on results
220
  </div>
221
  </div>
222
  """)
223
  with gr.Row():
224
- with gr.Column(elem_id = "col-left"):
225
  imgs = gr.Image(label="Person image", sources='upload', type="numpy")
226
  example = gr.Examples(
227
  inputs=imgs,
228
  examples_per_page=12,
229
  examples=human_list_path
230
  )
231
- with gr.Column(elem_id = "col-mid"):
232
- garm_prompt = gr.Textbox(label="Garment description", placeholder="Enter a description of the garment...")
233
- example_prompts = gr.Examples(
234
- inputs=garm_prompt,
235
- examples=["A red t-shirt", "Blue jeans", "A floral summer dress", "A black leather jacket"]
236
- )
237
- with gr.Column(elem_id = "col-right"):
238
  image_out = gr.Image(label="Result", show_share_button=False)
239
  with gr.Row():
240
  seed = gr.Slider(
@@ -250,26 +130,26 @@ with gr.Blocks(css=css) as Tryon:
250
  result_info = gr.Text(label="Response")
251
  test_button = gr.Button(value="Run", elem_id="button")
252
 
253
- test_button.click(fn=tryon, inputs=[imgs, garm_prompt, seed, randomize_seed], outputs=[image_out, seed_used, result_info], api_name='tryon', concurrency_limit=40)
254
 
255
- with gr.Column(elem_id = "col-showcase"):
256
  gr.HTML("""
257
  <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
258
  <div> </div>
259
  <br>
260
  <div>
261
- Virtual try-on examples in pairs of person images and garment descriptions
262
  </div>
263
  </div>
264
  """)
265
  show_case = gr.Examples(
266
  examples=[
267
- ["assets/examples/model2.png", "A blue t-shirt", "assets/examples/result2.png"],
268
- ["assets/examples/model3.png", "A red dress", "assets/examples/result3.png"],
269
- ["assets/examples/model1.png", "A black suit", "assets/examples/result1.png"],
270
  ],
271
- inputs=[imgs, garm_prompt, image_out],
272
  label=None
273
  )
274
 
275
- Tryon.launch()
 
11
  import time
12
 
13
 
 
 
 
 
 
 
 
 
 
 
14
  def tryon(person_img, garment_prompt, seed, randomize_seed):
15
  post_start_time = time.time()
16
+ if person_img is None or garment_prompt.strip() == "":
17
  return None, None, "Empty image or prompt"
18
  if randomize_seed:
19
  seed = random.randint(0, MAX_SEED)
20
+
 
 
 
 
21
  encoded_person_img = cv2.imencode('.jpg', cv2.cvtColor(person_img, cv2.COLOR_RGB2BGR))[1].tobytes()
22
  encoded_person_img = base64.b64encode(encoded_person_img).decode('utf-8')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ # Simulate generation of garment image from text prompt
25
+ generated_garment_img = np.zeros((person_img.shape[0], person_img.shape[1], 3), dtype=np.uint8) # Dummy garment image (black)
26
 
27
+ # Encode the generated garment image
28
+ encoded_garment_img = cv2.imencode('.jpg', generated_garment_img)[1].tobytes()
 
29
  encoded_garment_img = base64.b64encode(encoded_garment_img).decode('utf-8')
30
 
31
+ # Simulate request processing (no external API calls)
32
+ time.sleep(1)
33
+
34
+ # Decoding process (dummy)
35
+ result_img = cv2.imdecode(np.frombuffer(base64.b64decode(encoded_garment_img), np.uint8), cv2.IMREAD_UNCHANGED)
36
+ result_img = cv2.cvtColor(result_img, cv2.COLOR_RGB2BGR)
37
 
38
+ post_end_time = time.time()
39
+ print(f"post time used: {post_end_time - post_start_time}")
 
 
 
 
40
 
41
+ # Return the simulated result image, used seed, and success message
42
+ return result_img, seed, "Success"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
 
44
 
45
  MAX_SEED = 999999
46
 
 
 
 
47
  example_path = os.path.join(os.path.dirname(__file__), 'assets')
48
 
49
+ human_list = os.listdir(os.path.join(example_path, "human"))
50
+ human_list_path = [os.path.join(example_path, "human", human) for human in human_list]
51
 
52
+ css = """
53
  #col-left {
54
  margin: 0 auto;
55
  max-width: 430px;
 
76
  content = f.read()
77
  return content
78
 
79
+
80
  with gr.Blocks(css=css) as Tryon:
81
  gr.HTML(load_description("assets/title.md"))
82
  with gr.Row():
83
+ with gr.Column(elem_id="col-left"):
84
  gr.HTML("""
85
  <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
86
  <div>
 
88
  </div>
89
  </div>
90
  """)
91
+ with gr.Column(elem_id="col-mid"):
92
  gr.HTML("""
93
  <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
94
  <div>
95
+ Step 2. Enter a text prompt for the garment ⬇️
96
  </div>
97
  </div>
98
  """)
99
+ with gr.Column(elem_id="col-right"):
100
  gr.HTML("""
101
  <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
102
  <div>
103
+ Step 3. Press Run to get try-on results
104
  </div>
105
  </div>
106
  """)
107
  with gr.Row():
108
+ with gr.Column(elem_id="col-left"):
109
  imgs = gr.Image(label="Person image", sources='upload', type="numpy")
110
  example = gr.Examples(
111
  inputs=imgs,
112
  examples_per_page=12,
113
  examples=human_list_path
114
  )
115
+ with gr.Column(elem_id="col-mid"):
116
+ garment_prompt = gr.Textbox(label="Garment text prompt", placeholder="Describe the garment...")
117
+ with gr.Column(elem_id="col-right"):
 
 
 
 
118
  image_out = gr.Image(label="Result", show_share_button=False)
119
  with gr.Row():
120
  seed = gr.Slider(
 
130
  result_info = gr.Text(label="Response")
131
  test_button = gr.Button(value="Run", elem_id="button")
132
 
133
+ test_button.click(fn=tryon, inputs=[imgs, garment_prompt, seed, randomize_seed], outputs=[image_out, seed_used, result_info], concurrency_limit=40)
134
 
135
+ with gr.Column(elem_id="col-showcase"):
136
  gr.HTML("""
137
  <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
138
  <div> </div>
139
  <br>
140
  <div>
141
+ Virtual try-on examples in pairs of person and garment images
142
  </div>
143
  </div>
144
  """)
145
  show_case = gr.Examples(
146
  examples=[
147
+ ["assets/examples/model2.png", "assets/examples/garment2.png", "assets/examples/result2.png"],
148
+ ["assets/examples/model3.png", "assets/examples/garment3.png", "assets/examples/result3.png"],
149
+ ["assets/examples/model1.png", "assets/examples/garment1.png", "assets/examples/result1.png"],
150
  ],
151
+ inputs=[imgs, garment_prompt, image_out],
152
  label=None
153
  )
154
 
155
+ Tryon.launch()