Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,152 Bytes
bc0db10 1ad0483 8a26694 a0287a0 1ad0483 8a26694 1ad0483 2900131 1c46df5 bc0db10 e57d033 1c46df5 e57d033 1c46df5 e57d033 8a26694 2900131 1c46df5 bc0db10 e57d033 2900131 8a26694 2900131 e57d033 2900131 8a26694 2900131 baf4bca e57d033 baf4bca e57d033 de3bb71 e57d033 f4e9401 e57d033 f4e9401 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import spaces
import gradio as gr
import numpy as np
import torch
from chrislib.general import uninvert, invert, view, view_scale
from intrinsic.pipeline import load_models, run_pipeline
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Instead of loading models at startup, we'll create a cache for models
model_cache = {}
def get_model(model_version):
if model_version not in model_cache:
model_cache[model_version] = load_models(model_version, device=DEVICE)
return model_cache[model_version]
def generate_pipeline(models):
def pipeline_func(image, **kwargs):
return run_pipeline(models, image, **kwargs)
return pipeline_func
@spaces.GPU
def process_image(image, model_version):
# Check if image is provided
if image is None:
return [None, None, None]
print(f"Processing with model version: {model_version}")
print(image.shape)
image = image.astype(np.single) / 255.
# Get or load the selected model
models = get_model(model_version)
pipeline_func = generate_pipeline(models)
result = pipeline_func(image, device=DEVICE, resize_conf=1024)
return [view(result['hr_alb']), 1 - invert(result['dif_shd']), view_scale(result['pos_res'])]
with gr.Blocks(
css="""
#download {
height: 118px;
}
.slider .inner {
width: 5px;
background: #FFF;
}
.viewport {
aspect-ratio: 4/3;
}
.tabs button.selected {
font-size: 20px !important;
color: crimson !important;
}
h1 {
text-align: center;
display: block;
}
h2 {
text-align: center;
display: block;
}
h3 {
text-align: center;
display: block;
}
.md_feedback li {
margin-bottom: 0px !important;
}
.image-gallery {
display: flex;
flex-wrap: wrap;
gap: 10px;
justify-content: center;
}
.image-gallery > * {
flex: 1;
min-width: 200px;
}
""",
) as demo:
gr.Markdown(
"""
# Colorful Diffuse Intrinsic Image Decomposition in the Wild
<p align="center">
<a title="Website" href="https://yaksoy.github.io/ColorfulShading/" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
<img src="https://www.obukhov.ai/img/badges/badge-website.svg">
</a>
<a title="Github" href="https://github.com/compphoto/Intrinsic" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
<img src="https://img.shields.io/github/stars/compphoto/Intrinsic?label=GitHub%20%E2%98%85&logo=github&color=C8C" alt="badge-github-stars">
</a>
"""
)
# Model version selector with information panel
with gr.Row():
model_version = gr.Dropdown(
choices=["v2", "v2.1"],
value="v2",
label="Model Version",
info="Select which model weights to use",
scale=1
)
gr.Markdown("""
The model may take a few seconds to load the first time you use it.
Subsequent decompositions should be faster after the model is loaded.
""")
# Gallery-style layout for all images
with gr.Row(elem_classes="image-gallery"):
input_img = gr.Image(label="Input Image")
alb_img = gr.Image(label="Albedo")
shd_img = gr.Image(label="Diffuse Shading")
dif_img = gr.Image(label="Diffuse Image")
# Update to pass model_version to process_image
input_img.change(
process_image,
inputs=[input_img, model_version],
outputs=[alb_img, shd_img, dif_img]
)
# Add event handler for when model_version changes
model_version.change(
process_image,
inputs=[input_img, model_version],
outputs=[alb_img, shd_img, dif_img]
)
demo.launch(show_error=True)
|