vibs08 commited on
Commit
a4ac8a1
·
verified ·
1 Parent(s): 2be9aff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +143 -70
app.py CHANGED
@@ -214,93 +214,166 @@ app = FastAPI()
214
 
215
  # return preprocessed, mesh_name_obj, mesh_name_glb
216
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
  from gradio_client import Client
218
  import requests
219
- import json
220
 
221
- client = Client("vibs08/flash-sd3-new",hf_token=os.getenv("token"))
 
222
 
 
223
  url = 'https://vibs08-image-3d-fastapi.hf.space/process_image/'
224
 
225
-
226
  def text2img(prompt):
227
- result = client.predict(
228
- prompt=prompt,
229
- seed=0,
230
- randomize_seed=False,
231
- guidance_scale=1,
232
- num_inference_steps=4,
233
- negative_prompt="deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, NSFW, bad text",
234
- api_name="/infer"
235
- )
236
- return result
237
-
238
-
239
- def three_d(prompt,seed,fr,mc,auth,text=None):
240
-
241
- file_path = text2img(prompt)
242
- payload = {
243
- 'seed': seed,
244
- 'enhance_image': False,
245
- 'do_remove_background': True,
246
- 'foreground_ratio': fr,
247
- 'mc_resolution': mc,
248
- 'auth': auth,
249
- 'text_prompt': text
250
- }
251
-
252
- files = {
253
- 'file': (file_path, open(file_path, 'rb'), 'image/png')
254
- }
255
-
256
- headers = {
257
- 'accept': 'application/json'
258
- }
259
-
260
- response = requests.post(url, headers=headers, files=files, data=payload)
261
-
262
- return response.json()
 
 
 
 
 
 
 
 
263
  @app.post("/process_text/")
264
- async def process_image(
265
  text_prompt: str = Form(...),
266
  seed: int = Form(...),
267
  foreground_ratio: float = Form(...),
268
  mc_resolution: int = Form(...),
269
  auth: str = Form(...)
270
  ):
271
-
272
  if auth == os.getenv("AUTHORIZE"):
273
  return three_d(text_prompt, seed, foreground_ratio, mc_resolution, auth)
274
-
275
- # else:
276
- # return {"ERROR": "Too Many Requests!"}
277
-
278
- # preprocessed, mesh_name_obj, mesh_name_glb = run_example(text_prompt,seed ,do_remove_background, foreground_ratio, mc_resolution)
279
- # # preprocessed = preprocess(image_pil, do_remove_background, foreground_ratio)
280
- # # mesh_name_obj, mesh_name_glb = generate(preprocessed, mc_resolution)
281
- # timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')
282
- # object_name = f'object_{timestamp}_1.obj'
283
- # object_name_2 = f'object_{timestamp}_2.glb'
284
- # object_name_3 = f"object_{timestamp}.png"
285
- # preprocessed_image_tempfile = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
286
- # preprocessed.save(preprocessed_image_tempfile.name)
287
- # upload_file_to_s3(preprocessed_image_tempfile.name, 'framebucket3d', object_name_3)
288
-
289
-
290
- # if upload_file_to_s3(mesh_name_obj, 'framebucket3d',object_name) and upload_file_to_s3(mesh_name_glb, 'framebucket3d',object_name_2):
291
-
292
- # return {
293
- # "img_path": f"https://framebucket3d.s3.amazonaws.com/{object_name_3}",
294
- # "obj_path": f"https://framebucket3d.s3.amazonaws.com/{object_name}",
295
- # "glb_path": f"https://framebucket3d.s3.amazonaws.com/{object_name_2}"
296
-
297
- # }
298
-
299
- # else:
300
- # return {"Internal Server Error": False}
301
  else:
302
- return {"Authentication":"Failed"}
303
 
304
  if __name__ == "__main__":
305
  import uvicorn
306
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
214
 
215
  # return preprocessed, mesh_name_obj, mesh_name_glb
216
 
217
+ # from gradio_client import Client
218
+ # import requests
219
+ # import json
220
+
221
+ # client = Client("vibs08/flash-sd3-new",hf_token=os.getenv("token"))
222
+
223
+ # url = 'https://vibs08-image-3d-fastapi.hf.space/process_image/'
224
+
225
+
226
+ # def text2img(promptt):
227
+ # result = client.predict(
228
+ # prompt=promptt,
229
+ # seed=0,
230
+ # randomize_seed=False,
231
+ # guidance_scale=1,
232
+ # num_inference_steps=4,
233
+ # negative_prompt="deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, NSFW, bad text",
234
+ # api_name="/infer"
235
+ # )
236
+ # return result
237
+
238
+
239
+ # def three_d(prompt,seed,fr,mc,auth,text=None):
240
+
241
+ # file_path = text2img(prompt)
242
+ # payload = {
243
+ # 'seed': seed,
244
+ # 'enhance_image': False,
245
+ # 'do_remove_background': True,
246
+ # 'foreground_ratio': fr,
247
+ # 'mc_resolution': mc,
248
+ # 'auth': auth,
249
+ # 'text_prompt': text
250
+ # }
251
+
252
+ # files = {
253
+ # 'file': (file_path, open(file_path, 'rb'), 'image/png')
254
+ # }
255
+
256
+ # headers = {
257
+ # 'accept': 'application/json'
258
+ # }
259
+
260
+ # response = requests.post(url, headers=headers, files=files, data=payload)
261
+
262
+ # return response.json()
263
+ # @app.post("/process_text/")
264
+ # async def process_image(
265
+ # text_prompt: str = Form(...),
266
+ # seed: int = Form(...),
267
+ # foreground_ratio: float = Form(...),
268
+ # mc_resolution: int = Form(...),
269
+ # auth: str = Form(...)
270
+ # ):
271
+
272
+ # if auth == os.getenv("AUTHORIZE"):
273
+ # return three_d(text_prompt, seed, foreground_ratio, mc_resolution, auth)
274
+
275
+ # # else:
276
+ # # return {"ERROR": "Too Many Requests!"}
277
+
278
+ # # preprocessed, mesh_name_obj, mesh_name_glb = run_example(text_prompt,seed ,do_remove_background, foreground_ratio, mc_resolution)
279
+ # # # preprocessed = preprocess(image_pil, do_remove_background, foreground_ratio)
280
+ # # # mesh_name_obj, mesh_name_glb = generate(preprocessed, mc_resolution)
281
+ # # timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')
282
+ # # object_name = f'object_{timestamp}_1.obj'
283
+ # # object_name_2 = f'object_{timestamp}_2.glb'
284
+ # # object_name_3 = f"object_{timestamp}.png"
285
+ # # preprocessed_image_tempfile = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
286
+ # # preprocessed.save(preprocessed_image_tempfile.name)
287
+ # # upload_file_to_s3(preprocessed_image_tempfile.name, 'framebucket3d', object_name_3)
288
+
289
+
290
+ # # if upload_file_to_s3(mesh_name_obj, 'framebucket3d',object_name) and upload_file_to_s3(mesh_name_glb, 'framebucket3d',object_name_2):
291
+
292
+ # # return {
293
+ # # "img_path": f"https://framebucket3d.s3.amazonaws.com/{object_name_3}",
294
+ # # "obj_path": f"https://framebucket3d.s3.amazonaws.com/{object_name}",
295
+ # # "glb_path": f"https://framebucket3d.s3.amazonaws.com/{object_name_2}"
296
+
297
+ # # }
298
+
299
+ # # else:
300
+ # # return {"Internal Server Error": False}
301
+ # else:
302
+ # return {"Authentication":"Failed"}
303
+
304
+ # if __name__ == "__main__":
305
+ # import uvicorn
306
+ # uvicorn.run(app, host="0.0.0.0", port=7860)
307
+
308
+
309
  from gradio_client import Client
310
  import requests
311
+ import os
312
 
313
+ # Initialize Gradio client with Hugging Face token
314
+ client = Client("vibs08/flash-sd3-new", hf_token=os.getenv("token"))
315
 
316
+ # URL for processing image via FastAPI
317
  url = 'https://vibs08-image-3d-fastapi.hf.space/process_image/'
318
 
 
319
  def text2img(prompt):
320
+ # Use the Gradio client to generate an image from text
321
+ result = client.predict(
322
+ inputs=prompt, # Adjust the argument name based on the actual method signature
323
+ seed=0,
324
+ randomize_seed=False,
325
+ guidance_scale=1,
326
+ num_inference_steps=4,
327
+ negative_prompt="deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, NSFW, bad text",
328
+ api_name="/infer"
329
+ )
330
+
331
+ # Assuming result is a file path or image data
332
+ return result
333
+
334
+ def three_d(prompt, seed, fr, mc, auth, text=None):
335
+ file_path = text2img(prompt) # Get the file path of the generated image
336
+
337
+ payload = {
338
+ 'seed': seed,
339
+ 'enhance_image': False,
340
+ 'do_remove_background': True,
341
+ 'foreground_ratio': fr,
342
+ 'mc_resolution': mc,
343
+ 'auth': auth,
344
+ 'text_prompt': text
345
+ }
346
+
347
+ with open(file_path, 'rb') as image_file:
348
+ files = {
349
+ 'file': (file_path, image_file, 'image/png')
350
+ }
351
+
352
+ headers = {
353
+ 'accept': 'application/json'
354
+ }
355
+
356
+ response = requests.post(url, headers=headers, files=files, data=payload)
357
+
358
+ return response.json()
359
+
360
+ from fastapi import FastAPI, Form
361
+
362
+ app = FastAPI()
363
+
364
  @app.post("/process_text/")
365
+ async def process_text(
366
  text_prompt: str = Form(...),
367
  seed: int = Form(...),
368
  foreground_ratio: float = Form(...),
369
  mc_resolution: int = Form(...),
370
  auth: str = Form(...)
371
  ):
 
372
  if auth == os.getenv("AUTHORIZE"):
373
  return three_d(text_prompt, seed, foreground_ratio, mc_resolution, auth)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
374
  else:
375
+ return {"Authentication": "Failed"}
376
 
377
  if __name__ == "__main__":
378
  import uvicorn
379
+ uvicorn.run(app, host="0.0.0.0", port=7860)