Update app.py
Browse files
app.py
CHANGED
@@ -39,7 +39,6 @@ blurin = "1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
|
|
39 |
edge = []
|
40 |
gradient = None
|
41 |
params = { "fnum":0 }
|
42 |
-
dcolor = []
|
43 |
pcolors = []
|
44 |
frame_selected = 0
|
45 |
frames = []
|
@@ -459,7 +458,6 @@ def seg_frame(newmask, b, d):
|
|
459 |
|
460 |
|
461 |
def select_frame(d, evt: gr.SelectData):
|
462 |
-
global dcolor
|
463 |
global frame_selected
|
464 |
global depths
|
465 |
global masks
|
@@ -468,13 +466,8 @@ def select_frame(d, evt: gr.SelectData):
|
|
468 |
if evt.index != frame_selected:
|
469 |
edge = []
|
470 |
frame_selected = evt.index
|
471 |
-
|
472 |
-
if len(dcolor) == 0:
|
473 |
-
bg = [127, 127, 127, 255]
|
474 |
-
else:
|
475 |
-
bg = "[" + str(dcolor[frame_selected])[1:-1] + ", 255]"
|
476 |
|
477 |
-
return depths[frame_selected], frame_selected
|
478 |
|
479 |
def switch_rows(v):
|
480 |
global frames
|
@@ -486,61 +479,6 @@ def switch_rows(v):
|
|
486 |
print(frames[0])
|
487 |
return frames
|
488 |
|
489 |
-
def optimize(v, d):
|
490 |
-
global pcolors
|
491 |
-
global dcolor
|
492 |
-
global frame_selected
|
493 |
-
global frames
|
494 |
-
global depths
|
495 |
-
|
496 |
-
if v == True:
|
497 |
-
ddepth = cv2.CV_16S
|
498 |
-
kernel_size = 3
|
499 |
-
l = 16
|
500 |
-
|
501 |
-
dcolor = []
|
502 |
-
for k, f in enumerate(frames):
|
503 |
-
frame = cv2.imread(frames[k]).astype(np.uint8)
|
504 |
-
|
505 |
-
# convert to np.float32
|
506 |
-
f = np.float32(frame.reshape((-1,3)))
|
507 |
-
# define criteria, number of clusters(K) and apply kmeans()
|
508 |
-
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 4, 1.0)
|
509 |
-
ret,label,center=cv2.kmeans(f,l,None,criteria,4,cv2.KMEANS_RANDOM_CENTERS)
|
510 |
-
# Now convert back into uint8, and make original image
|
511 |
-
center = np.uint8(center)
|
512 |
-
res = center[label.flatten()]
|
513 |
-
frame = res.reshape((frame.shape))
|
514 |
-
|
515 |
-
depth = cv2.imread(depths[k]).astype(np.uint8)
|
516 |
-
mask = cv2.cvtColor(depth, cv2.COLOR_RGB2GRAY)
|
517 |
-
dcolor.append(bincount(frame[mask==0]))
|
518 |
-
print(dcolor[k])
|
519 |
-
clrs = Image.fromarray(frame.astype(np.uint8)).convert('RGB').getcolors()
|
520 |
-
i=0
|
521 |
-
while i<len(clrs):
|
522 |
-
clrs[i] = list(clrs[i][1])
|
523 |
-
clrs[i].append(255)
|
524 |
-
i=i+1
|
525 |
-
print(clrs)
|
526 |
-
pcolors = clrs
|
527 |
-
|
528 |
-
#mask = cv2.convertScaleAbs(cv2.Laplacian(cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY), ddepth, ksize=kernel_size))
|
529 |
-
#mask[mask>0] = 255
|
530 |
-
#frame[mask==0] = (0, 0, 0)
|
531 |
-
cv2.imwrite(frames[k], frame)
|
532 |
-
|
533 |
-
#depth[mask==0] = (255,255,255)
|
534 |
-
mask = cv2.inRange(frame, np.array([dcolor[k][0]-8, dcolor[k][1]-8, dcolor[k][2]-8]), np.array([dcolor[k][0]+8, dcolor[k][1]+8, dcolor[k][2]+8]))
|
535 |
-
depth[mask>0] = (255,255,255)
|
536 |
-
depth[depth.shape[0]-1:depth.shape[0], 0:depth.shape[1]] = (160, 160, 160)
|
537 |
-
depth[0:1, 0:depth.shape[1]] = (0, 0, 0)
|
538 |
-
cv2.imwrite(depths[k], depth)
|
539 |
-
|
540 |
-
if d == False:
|
541 |
-
return frames, "[" + str(dcolor[frame_selected])[1:-1] + ", 255]"
|
542 |
-
else:
|
543 |
-
return depths, "[" + str(dcolor[frame_selected])[1:-1] + ", 255]"
|
544 |
|
545 |
def bincount(a):
|
546 |
a2D = a.reshape(-1,a.shape[-1])
|
@@ -560,7 +498,7 @@ def reset_mask(d):
|
|
560 |
backup = cv2.imread(backups[frame_selected]).astype(np.uint8)
|
561 |
cv2.imwrite(frames[frame_selected], backup)
|
562 |
|
563 |
-
d["layers"][0] =
|
564 |
|
565 |
return gr.ImageEditor(value=d)
|
566 |
|
@@ -645,7 +583,7 @@ def findNormals(format):
|
|
645 |
|
646 |
|
647 |
load_model="""
|
648 |
-
async(c, o,
|
649 |
var intv = setInterval(function(){
|
650 |
if (document.getElementById("iframe3D")===null || typeof document.getElementById("iframe3D")==="undefined") {
|
651 |
try {
|
@@ -666,12 +604,8 @@ async(c, o, b, p, d, n, m)=>{
|
|
666 |
}
|
667 |
});
|
668 |
|
669 |
-
var bg = JSON.parse(document.getElementById("bgcolor").getElementsByTagName("textarea")[0].value);
|
670 |
BABYLON.Engine.LastCreatedScene.getEngine().setHardwareScalingLevel(1.0);
|
671 |
-
|
672 |
-
bg[i] /= 255;
|
673 |
-
}
|
674 |
-
BABYLON.Engine.LastCreatedScene.clearColor = new BABYLON.Color4(bg[0], bg[1], bg[2], bg[3]);
|
675 |
BABYLON.Engine.LastCreatedScene.ambientColor = new BABYLON.Color4(255,255,255,255);
|
676 |
//BABYLON.Engine.LastCreatedScene.autoClear = false;
|
677 |
//BABYLON.Engine.LastCreatedScene.autoClearDepthAndStencil = false;
|
@@ -748,7 +682,6 @@ async(c, o, b, p, d, n, m)=>{
|
|
748 |
var o_ = o.split(",");
|
749 |
document.getElementById("iframe3D").contentDocument.getElementById("coords").value = c;
|
750 |
document.getElementById("iframe3D").contentDocument.getElementById("order").value = o;
|
751 |
-
document.getElementById("iframe3D").contentDocument.getElementById("bgcolor").value = b;
|
752 |
document.getElementById("iframe3D").contentDocument.getElementById("bgimage").value = "";
|
753 |
document.getElementById("iframe3D").contentDocument.getElementById("bgdepth").value = "";
|
754 |
for (var j=0; j<o_.length; j++) {
|
@@ -918,9 +851,6 @@ with gr.Blocks(css=css, js=js) as demo:
|
|
918 |
with gr.Accordion(label="Depths", open=False):
|
919 |
output_depth = gr.Files(label="Depth files", interactive=False)
|
920 |
output_switch.input(fn=switch_rows, inputs=[output_switch], outputs=[output_frame])
|
921 |
-
optimize_switch = gr.Checkbox(label="Optimize")
|
922 |
-
bgcolor = gr.Textbox(elem_id="bgcolor", value="[127, 127, 127, 255]", label="Background color", interactive=False)
|
923 |
-
optimize_switch.input(fn=optimize, inputs=[optimize_switch, output_switch], outputs=[output_frame, bgcolor])
|
924 |
output_mask = gr.ImageEditor(layers=False, sources=('upload', 'clipboard'), show_download_button=True, type="numpy", interactive=True, transforms=(None,), eraser=gr.Eraser(), brush=gr.Brush(default_size=0, colors=['black', '#505050', '#a0a0a0', 'white']), elem_id="image_edit")
|
925 |
with gr.Row():
|
926 |
selector = gr.HTML(value="""
|
@@ -1034,7 +964,7 @@ with gr.Blocks(css=css, js=js) as demo:
|
|
1034 |
blur_in = gr.Textbox(elem_id="blur_in", label="Kernel size", show_label=False, interactive=False, value=blurin)
|
1035 |
with gr.Accordion(label="Locations", open=False):
|
1036 |
selected = gr.Number(elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False)
|
1037 |
-
output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected
|
1038 |
example_coords = """[
|
1039 |
{"lat": 50.07379596793083, "lng": 14.437146122950555, "heading": 152.70303, "pitch": 2.607833999999997},
|
1040 |
{"lat": 50.073799567020004, "lng": 14.437146774240507, "heading": 151.12973, "pitch": 2.8672300000000064},
|
@@ -1174,7 +1104,7 @@ with gr.Blocks(css=css, js=js) as demo:
|
|
1174 |
return output_video_path + (json.dumps(locations),)
|
1175 |
|
1176 |
submit.click(on_submit, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
|
1177 |
-
render.click(None, inputs=[coords, mesh_order,
|
1178 |
render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all], outputs=[result, result_file, mesh_order])
|
1179 |
|
1180 |
example_files = [["./examples/streetview.mp4", "vits", blurin, 1, 32, example_coords]]
|
|
|
39 |
edge = []
|
40 |
gradient = None
|
41 |
params = { "fnum":0 }
|
|
|
42 |
pcolors = []
|
43 |
frame_selected = 0
|
44 |
frames = []
|
|
|
458 |
|
459 |
|
460 |
def select_frame(d, evt: gr.SelectData):
|
|
|
461 |
global frame_selected
|
462 |
global depths
|
463 |
global masks
|
|
|
466 |
if evt.index != frame_selected:
|
467 |
edge = []
|
468 |
frame_selected = evt.index
|
|
|
|
|
|
|
|
|
|
|
469 |
|
470 |
+
return depths[frame_selected], frame_selected
|
471 |
|
472 |
def switch_rows(v):
|
473 |
global frames
|
|
|
479 |
print(frames[0])
|
480 |
return frames
|
481 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
482 |
|
483 |
def bincount(a):
|
484 |
a2D = a.reshape(-1,a.shape[-1])
|
|
|
498 |
backup = cv2.imread(backups[frame_selected]).astype(np.uint8)
|
499 |
cv2.imwrite(frames[frame_selected], backup)
|
500 |
|
501 |
+
d["layers"][0] = None
|
502 |
|
503 |
return gr.ImageEditor(value=d)
|
504 |
|
|
|
583 |
|
584 |
|
585 |
load_model="""
|
586 |
+
async(c, o, p, d, n, m)=>{
|
587 |
var intv = setInterval(function(){
|
588 |
if (document.getElementById("iframe3D")===null || typeof document.getElementById("iframe3D")==="undefined") {
|
589 |
try {
|
|
|
604 |
}
|
605 |
});
|
606 |
|
|
|
607 |
BABYLON.Engine.LastCreatedScene.getEngine().setHardwareScalingLevel(1.0);
|
608 |
+
BABYLON.Engine.LastCreatedScene.clearColor = new BABYLON.Color4(255,255,255,255);
|
|
|
|
|
|
|
609 |
BABYLON.Engine.LastCreatedScene.ambientColor = new BABYLON.Color4(255,255,255,255);
|
610 |
//BABYLON.Engine.LastCreatedScene.autoClear = false;
|
611 |
//BABYLON.Engine.LastCreatedScene.autoClearDepthAndStencil = false;
|
|
|
682 |
var o_ = o.split(",");
|
683 |
document.getElementById("iframe3D").contentDocument.getElementById("coords").value = c;
|
684 |
document.getElementById("iframe3D").contentDocument.getElementById("order").value = o;
|
|
|
685 |
document.getElementById("iframe3D").contentDocument.getElementById("bgimage").value = "";
|
686 |
document.getElementById("iframe3D").contentDocument.getElementById("bgdepth").value = "";
|
687 |
for (var j=0; j<o_.length; j++) {
|
|
|
851 |
with gr.Accordion(label="Depths", open=False):
|
852 |
output_depth = gr.Files(label="Depth files", interactive=False)
|
853 |
output_switch.input(fn=switch_rows, inputs=[output_switch], outputs=[output_frame])
|
|
|
|
|
|
|
854 |
output_mask = gr.ImageEditor(layers=False, sources=('upload', 'clipboard'), show_download_button=True, type="numpy", interactive=True, transforms=(None,), eraser=gr.Eraser(), brush=gr.Brush(default_size=0, colors=['black', '#505050', '#a0a0a0', 'white']), elem_id="image_edit")
|
855 |
with gr.Row():
|
856 |
selector = gr.HTML(value="""
|
|
|
964 |
blur_in = gr.Textbox(elem_id="blur_in", label="Kernel size", show_label=False, interactive=False, value=blurin)
|
965 |
with gr.Accordion(label="Locations", open=False):
|
966 |
selected = gr.Number(elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False)
|
967 |
+
output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected])
|
968 |
example_coords = """[
|
969 |
{"lat": 50.07379596793083, "lng": 14.437146122950555, "heading": 152.70303, "pitch": 2.607833999999997},
|
970 |
{"lat": 50.073799567020004, "lng": 14.437146774240507, "heading": 151.12973, "pitch": 2.8672300000000064},
|
|
|
1104 |
return output_video_path + (json.dumps(locations),)
|
1105 |
|
1106 |
submit.click(on_submit, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
|
1107 |
+
render.click(None, inputs=[coords, mesh_order, output_frame, output_mask, selected, output_depth], outputs=None, js=load_model)
|
1108 |
render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all], outputs=[result, result_file, mesh_order])
|
1109 |
|
1110 |
example_files = [["./examples/streetview.mp4", "vits", blurin, 1, 32, example_coords]]
|