Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
e64952a
1
Parent(s):
5d472b1
longer gpu
Browse files- app.py +3 -3
- local_app.py +4 -28
app.py
CHANGED
@@ -384,7 +384,7 @@ with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
|
|
384 |
def turn_buttons_on():
|
385 |
return gr.update(visible=True), gr.update(visible=True)
|
386 |
|
387 |
-
@spaces.GPU(duration=
|
388 |
@torch.inference_mode()
|
389 |
def process_image(
|
390 |
image,
|
@@ -433,11 +433,11 @@ def process_image(
|
|
433 |
generator=generator,
|
434 |
image=control_image,
|
435 |
).images[0]
|
436 |
-
torch.cuda.synchronize()
|
437 |
-
torch.cuda.empty_cache()
|
438 |
print(f"\n-------------------------Preprocess done in: {preprocess_time:.2f} seconds-------------------------")
|
439 |
print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
|
440 |
results.save("temp_image.jpg")
|
|
|
|
|
441 |
return results
|
442 |
|
443 |
if prod:
|
|
|
384 |
def turn_buttons_on():
|
385 |
return gr.update(visible=True), gr.update(visible=True)
|
386 |
|
387 |
+
@spaces.GPU(duration=59)
|
388 |
@torch.inference_mode()
|
389 |
def process_image(
|
390 |
image,
|
|
|
433 |
generator=generator,
|
434 |
image=control_image,
|
435 |
).images[0]
|
|
|
|
|
436 |
print(f"\n-------------------------Preprocess done in: {preprocess_time:.2f} seconds-------------------------")
|
437 |
print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
|
438 |
results.save("temp_image.jpg")
|
439 |
+
torch.cuda.synchronize()
|
440 |
+
torch.cuda.empty_cache()
|
441 |
return results
|
442 |
|
443 |
if prod:
|
local_app.py
CHANGED
@@ -431,38 +431,14 @@ def process_image(
|
|
431 |
generator=generator,
|
432 |
image=control_image,
|
433 |
).images[0]
|
434 |
-
torch.cuda.synchronize()
|
435 |
-
torch.cuda.empty_cache()
|
436 |
print(f"\n-------------------------Preprocess done in: {preprocess_time:.2f} seconds-------------------------")
|
437 |
print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
|
438 |
-
|
439 |
-
# timestamp = int(time.time())
|
440 |
-
#if not os.path.exists("./outputs"):
|
441 |
-
# os.makedirs("./outputs")
|
442 |
-
# img_path = f"./{timestamp}.jpg"
|
443 |
-
# results_path = f"./{timestamp}_out_{prompt}.jpg"
|
444 |
-
# imageio.imsave(img_path, image)
|
445 |
-
# results.save(results_path)
|
446 |
results.save("temp_image.jpg")
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
# path_in_repo=img_path,
|
451 |
-
# repo_id="broyang/anime-ai-outputs",
|
452 |
-
# repo_type="dataset",
|
453 |
-
# token=API_KEY,
|
454 |
-
# run_as_future=True,
|
455 |
-
# )
|
456 |
-
# api.upload_file(
|
457 |
-
# path_or_fileobj=results_path,
|
458 |
-
# path_in_repo=results_path,
|
459 |
-
# repo_id="broyang/anime-ai-outputs",
|
460 |
-
# repo_type="dataset",
|
461 |
-
# token=API_KEY,
|
462 |
-
# run_as_future=True,
|
463 |
-
# )
|
464 |
-
|
465 |
return results
|
|
|
466 |
if prod:
|
467 |
demo.queue(max_size=20).launch(server_name="localhost", server_port=port)
|
468 |
else:
|
|
|
431 |
generator=generator,
|
432 |
image=control_image,
|
433 |
).images[0]
|
|
|
|
|
434 |
print(f"\n-------------------------Preprocess done in: {preprocess_time:.2f} seconds-------------------------")
|
435 |
print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
436 |
results.save("temp_image.jpg")
|
437 |
+
torch.cuda.synchronize()
|
438 |
+
torch.cuda.empty_cache()
|
439 |
+
results.save("temp_image.jpg")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
440 |
return results
|
441 |
+
|
442 |
if prod:
|
443 |
demo.queue(max_size=20).launch(server_name="localhost", server_port=port)
|
444 |
else:
|