|
import gradio as gr |
|
import cv2 |
|
from PIL import Image |
|
import numpy as np |
|
import os |
|
import torch |
|
import torch.nn.functional as F |
|
from torchvision import transforms |
|
from torchvision.transforms import Compose |
|
import tempfile |
|
from functools import partial |
|
import spaces |
|
from zipfile import ZipFile |
|
from vincenty import vincenty |
|
import json |
|
from collections import Counter |
|
import mediapy |
|
|
|
|
|
|
|
from huggingface_hub import hf_hub_download |
|
from depth_anything_v2.dpt import DepthAnythingV2 |
|
|
|
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
model_configs = { |
|
'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, |
|
'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, |
|
'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, |
|
'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]} |
|
} |
|
encoder2name = { |
|
'vits': 'Small', |
|
'vitb': 'Base', |
|
'vitl': 'Large', |
|
'vitg': 'Giant', |
|
} |
|
|
|
blurin = "1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1" |
|
edge = [] |
|
gradient = None |
|
params = { "fnum":0 } |
|
pcolors = [] |
|
frame_selected = 0 |
|
frames = [] |
|
backups = [] |
|
depths = [] |
|
masks = [] |
|
locations = [] |
|
mesh = [] |
|
mesh_n = [] |
|
scene = None |
|
|
|
def zip_files(files_in, files_out): |
|
with ZipFile("depth_result.zip", "w") as zipObj: |
|
for idx, file in enumerate(files_in): |
|
zipObj.write(file, file.split("/")[-1]) |
|
for idx, file in enumerate(files_out): |
|
zipObj.write(file, file.split("/")[-1]) |
|
return "depth_result.zip" |
|
|
|
def create_video(frames, fps, type): |
|
print("building video result") |
|
imgs = [] |
|
for j, img in enumerate(frames): |
|
imgs.append(cv2.cvtColor(cv2.imread(img).astype(np.uint8), cv2.COLOR_BGR2RGB)) |
|
|
|
mediapy.write_video(type + "_result.mp4", imgs, fps=fps) |
|
return type + "_result.mp4" |
|
|
|
@torch.no_grad() |
|
|
|
def predict_depth(image, model): |
|
return model.infer_image(image) |
|
|
|
|
|
|
|
|
|
def make_video(video_path, outdir='./vis_video_depth', encoder='vits', blur_data=blurin, o=1, b=32): |
|
if encoder not in ["vitl","vitb","vits","vitg"]: |
|
encoder = "vits" |
|
|
|
model_name = encoder2name[encoder] |
|
model = DepthAnythingV2(**model_configs[encoder]) |
|
filepath = hf_hub_download(repo_id=f"depth-anything/Depth-Anything-V2-{model_name}", filename=f"depth_anything_v2_{encoder}.pth", repo_type="model") |
|
state_dict = torch.load(filepath, map_location="cpu") |
|
model.load_state_dict(state_dict) |
|
model = model.to(DEVICE).eval() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if os.path.isfile(video_path): |
|
if video_path.endswith('txt'): |
|
with open(video_path, 'r') as f: |
|
lines = f.read().splitlines() |
|
else: |
|
filenames = [video_path] |
|
else: |
|
filenames = os.listdir(video_path) |
|
filenames = [os.path.join(video_path, filename) for filename in filenames if not filename.startswith('.')] |
|
filenames.sort() |
|
|
|
|
|
global masks |
|
|
|
for k, filename in enumerate(filenames): |
|
file_size = os.path.getsize(filename)/1024/1024 |
|
if file_size > 128.0: |
|
print(f'File size of {filename} larger than 128Mb, sorry!') |
|
return filename |
|
print('Progress {:}/{:},'.format(k+1, len(filenames)), 'Processing', filename) |
|
|
|
raw_video = cv2.VideoCapture(filename) |
|
frame_width, frame_height = int(raw_video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(raw_video.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
|
frame_rate = int(raw_video.get(cv2.CAP_PROP_FPS)) |
|
if frame_rate < 1: |
|
frame_rate = 1 |
|
cframes = int(raw_video.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
print(f'frames: {cframes}, fps: {frame_rate}') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
count = 0 |
|
n = 0 |
|
depth_frames = [] |
|
orig_frames = [] |
|
backup_frames = [] |
|
thumbnail_old = [] |
|
|
|
while raw_video.isOpened(): |
|
ret, raw_frame = raw_video.read() |
|
if not ret: |
|
break |
|
else: |
|
print(count) |
|
|
|
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2RGB) / 255.0 |
|
frame_pil = Image.fromarray((frame * 255).astype(np.uint8)) |
|
|
|
|
|
|
|
|
|
|
|
depth = predict_depth(raw_frame[:, :, ::-1], model) |
|
depth_gray = ((depth - depth.min()) / (depth.max() - depth.min()) * 255.0).astype(np.uint8) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mask = cv2.inRange(depth_gray[0:int(depth_gray.shape[0]/8*7)-1, 0:depth_gray.shape[1]], 250, 255) |
|
|
|
depth_gray[0:int(depth_gray.shape[0]/8*7)-1, 0:depth_gray.shape[1]][mask>0] = 0 |
|
|
|
mask = cv2.inRange(depth_gray[int(depth_gray.shape[0]/8*7):depth_gray.shape[0], 0:depth_gray.shape[1]], 192, 255) |
|
depth_gray[int(depth_gray.shape[0]/8*7):depth_gray.shape[0], 0:depth_gray.shape[1]][mask>0] = 192 |
|
|
|
depth_color = cv2.cvtColor(depth_gray, cv2.COLOR_GRAY2BGR) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if cframes < 16: |
|
thumbnail = cv2.cvtColor(cv2.resize(raw_frame, (16,32)), cv2.COLOR_BGR2GRAY).flatten() |
|
if len(thumbnail_old) > 0: |
|
diff = thumbnail - thumbnail_old |
|
|
|
c = Counter(diff) |
|
value, cc = c.most_common()[0] |
|
if value == 0 and cc > int(16*32*0.8): |
|
count += 1 |
|
continue |
|
thumbnail_old = thumbnail |
|
|
|
blur_frame = blur_image(raw_frame, depth_color, blur_data) |
|
|
|
cv2.imwrite(f"f{count}.jpg", blur_frame) |
|
orig_frames.append(f"f{count}.jpg") |
|
|
|
cv2.imwrite(f"f{count}_.jpg", blur_frame) |
|
backup_frames.append(f"f{count}_.jpg") |
|
|
|
cv2.imwrite(f"f{count}_dmap.jpg", depth_color) |
|
depth_frames.append(f"f{count}_dmap.jpg") |
|
|
|
depth_gray = seg_frame(depth_gray, o, b) + 128 |
|
|
|
|
|
cv2.imwrite(f"f{count}_mask.jpg", depth_gray) |
|
masks.append(f"f{count}_mask.jpg") |
|
count += 1 |
|
|
|
final_vid = create_video(orig_frames, frame_rate, "orig") |
|
|
|
|
|
final_zip = zip_files(orig_frames, depth_frames) |
|
raw_video.release() |
|
|
|
cv2.destroyAllWindows() |
|
|
|
global gradient |
|
global frame_selected |
|
global depths |
|
global frames |
|
global backups |
|
frames = orig_frames |
|
backups = backup_frames |
|
depths = depth_frames |
|
|
|
if depth_color.shape[0] == 2048: |
|
gradient = cv2.imread('./gradient_large.png').astype(np.uint8) |
|
elif depth_color.shape[0] == 1024: |
|
gradient = cv2.imread('./gradient.png').astype(np.uint8) |
|
else: |
|
gradient = cv2.imread('./gradient_small.png').astype(np.uint8) |
|
|
|
return final_vid, final_zip, frames, masks[frame_selected], depths |
|
|
|
def depth_edges_mask(depth): |
|
"""Returns a mask of edges in the depth map. |
|
Args: |
|
depth: 2D numpy array of shape (H, W) with dtype float32. |
|
Returns: |
|
mask: 2D numpy array of shape (H, W) with dtype bool. |
|
""" |
|
|
|
depth_dx, depth_dy = np.gradient(depth) |
|
|
|
depth_grad = np.sqrt(depth_dx ** 2 + depth_dy ** 2) |
|
|
|
mask = depth_grad > 0.05 |
|
return mask |
|
|
|
def pano_depth_to_world_points(depth): |
|
""" |
|
360 depth to world points |
|
given 2D depth is an equirectangular projection of a spherical image |
|
Treat depth as radius |
|
longitude : -pi to pi |
|
latitude : -pi/2 to pi/2 |
|
""" |
|
|
|
|
|
radius = (255 - depth.flatten()) |
|
|
|
lon = np.linspace(0, np.pi*2, depth.shape[1]) |
|
lat = np.linspace(0, np.pi, depth.shape[0]) |
|
lon, lat = np.meshgrid(lon, lat) |
|
lon = lon.flatten() |
|
lat = lat.flatten() |
|
|
|
pts3d = [[0,0,0]] |
|
uv = [[0,0]] |
|
nl = [[0,0,0]] |
|
for i in range(0, 1): |
|
for j in range(0, 1): |
|
|
|
|
|
d_lon = lon + i/2 * np.pi*2 / depth.shape[1] |
|
d_lat = lat + j/2 * np.pi / depth.shape[0] |
|
|
|
nx = np.cos(d_lon) * np.sin(d_lat) |
|
ny = np.cos(d_lat) |
|
nz = np.sin(d_lon) * np.sin(d_lat) |
|
|
|
|
|
x = radius * nx |
|
y = radius * ny |
|
z = radius * nz |
|
|
|
pts = np.stack([x, y, z], axis=1) |
|
uvs = np.stack([lon/np.pi/2, lat/np.pi], axis=1) |
|
nls = np.stack([-nx, -ny, -nz], axis=1) |
|
|
|
pts3d = np.concatenate((pts3d, pts), axis=0) |
|
uv = np.concatenate((uv, uvs), axis=0) |
|
nl = np.concatenate((nl, nls), axis=0) |
|
|
|
j = j+1 |
|
i = i+1 |
|
|
|
return [pts3d, uv, nl] |
|
|
|
def rgb2gray(rgb): |
|
return np.dot(rgb[...,:3], [0.333, 0.333, 0.333]) |
|
|
|
def get_mesh(image, depth, blur_data, loadall): |
|
global depths |
|
global pcolors |
|
global frame_selected |
|
global mesh |
|
global mesh_n |
|
global scene |
|
if loadall == False: |
|
mesh = [] |
|
mesh_n = [] |
|
fnum = frame_selected |
|
|
|
|
|
|
|
|
|
depthc = cv2.imread(depths[frame_selected], cv2.IMREAD_UNCHANGED).astype(np.uint8) |
|
blur_img = blur_image(cv2.imread(image[fnum][0], cv2.IMREAD_UNCHANGED).astype(np.uint8), depthc, blur_data) |
|
gdepth = cv2.cvtColor(depthc, cv2.COLOR_RGB2GRAY) |
|
|
|
print('depth to gray - ok') |
|
points = pano_depth_to_world_points(gdepth) |
|
pts3d = points[0] |
|
uv = points[1] |
|
nl = points[2] |
|
print('radius from depth - ok') |
|
|
|
|
|
|
|
|
|
uvs = uv.reshape(-1, 2) |
|
|
|
|
|
verts = [[0,0,0]] |
|
normals = nl.reshape(-1, 3) |
|
rgba = cv2.cvtColor(blur_img, cv2.COLOR_RGB2RGBA) |
|
colors = rgba.reshape(-1, 4) |
|
clrs = [[128,128,128,0]] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not str(fnum) in mesh_n: |
|
mesh_n.append(str(fnum)) |
|
print('mesh - ok') |
|
|
|
|
|
|
|
|
|
|
|
return "./TriangleWithoutIndices.gltf", ",".join(mesh_n) |
|
|
|
def blur_image(image, depth, blur_data): |
|
blur_a = blur_data.split() |
|
|
|
|
|
blur_frame = image.copy() |
|
j = 0 |
|
while j < 256: |
|
i = 255 - j |
|
blur_lo = np.array([i,i,i]) |
|
blur_hi = np.array([i+1,i+1,i+1]) |
|
blur_mask = cv2.inRange(depth, blur_lo, blur_hi) |
|
|
|
|
|
blur = cv2.GaussianBlur(image, (int(blur_a[j]), int(blur_a[j])), 0) |
|
|
|
blur_frame[blur_mask>0] = blur[blur_mask>0] |
|
j = j + 1 |
|
|
|
white = cv2.inRange(blur_frame, np.array([255,255,255]), np.array([255,255,255])) |
|
blur_frame[white>0] = (254,254,254) |
|
|
|
return blur_frame |
|
|
|
def loadfile(f): |
|
return f |
|
|
|
def show_json(txt): |
|
data = json.loads(txt) |
|
print(txt) |
|
i=0 |
|
while i < len(data[2]): |
|
data[2][i] = data[2][i]["image"]["path"] |
|
data[4][i] = data[4][i]["path"] |
|
i=i+1 |
|
return data[0]["video"]["path"], data[1]["path"], data[2], data[3]["background"]["path"], data[4], data[5] |
|
|
|
|
|
def seg_frame(newmask, b, d): |
|
|
|
if newmask.shape[0] == 2048: |
|
gd = cv2.imread('./gradient_large.png', cv2.IMREAD_GRAYSCALE).astype(np.uint8) |
|
elif newmask.shape[0] == 1024: |
|
gd = cv2.imread('./gradient.png', cv2.IMREAD_GRAYSCALE).astype(np.uint8) |
|
else: |
|
gd = cv2.imread('./gradient_small.png', cv2.IMREAD_GRAYSCALE).astype(np.uint8) |
|
|
|
newmask[np.absolute(newmask.astype(np.int16)-gd.astype(np.int16))<16] = 0 |
|
ret,newmask = cv2.threshold(newmask,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) |
|
|
|
|
|
|
|
element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * b + 1, 2 * b + 1), (b, b)) |
|
bd = cv2.erode(newmask, element) |
|
element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * d + 1, 2 * d + 1), (d, d)) |
|
bg = cv2.dilate(newmask, element) |
|
bg[bg.shape[0]-64:bg.shape[0],0:bg.shape[1]] = 0 |
|
|
|
mask = np.zeros(newmask.shape[:2],np.uint8) |
|
|
|
|
|
|
|
mask[bg == 255] = 3 |
|
mask[bd == 255] = 1 |
|
|
|
return mask |
|
|
|
|
|
def select_frame(d, evt: gr.SelectData): |
|
global frame_selected |
|
global depths |
|
global masks |
|
global edge |
|
|
|
if evt.index != frame_selected: |
|
edge = [] |
|
frame_selected = evt.index |
|
|
|
return depths[frame_selected], frame_selected |
|
|
|
def switch_rows(v): |
|
global frames |
|
global depths |
|
if v == True: |
|
print(depths[0]) |
|
return depths |
|
else: |
|
print(frames[0]) |
|
return frames |
|
|
|
|
|
def bincount(a): |
|
a2D = a.reshape(-1,a.shape[-1]) |
|
col_range = (256, 256, 256) |
|
a1D = np.ravel_multi_index(a2D.T, col_range) |
|
return list(reversed(np.unravel_index(np.bincount(a1D).argmax(), col_range))) |
|
|
|
def reset_mask(d): |
|
global frame_selected |
|
global frames |
|
global backups |
|
global masks |
|
global depths |
|
global edge |
|
|
|
edge = [] |
|
backup = cv2.imread(backups[frame_selected]).astype(np.uint8) |
|
cv2.imwrite(frames[frame_selected], backup) |
|
|
|
d["layers"][0][0:d["layers"][0].shape[0], 0:d["layers"][0].shape[1]] = (0,0,0,0) |
|
|
|
return gr.ImageEditor(value=d) |
|
|
|
|
|
def draw_mask(o, b, v, d, evt: gr.EventData): |
|
global frames |
|
global depths |
|
global params |
|
global frame_selected |
|
global masks |
|
global gradient |
|
global edge |
|
|
|
points = json.loads(v) |
|
pts = np.array(points, np.int32) |
|
pts = pts.reshape((-1,1,2)) |
|
|
|
if len(edge) == 0 or params["fnum"] != frame_selected: |
|
if params["fnum"] != frame_selected: |
|
d["background"] = cv2.imread(depths[frame_selected]).astype(np.uint8) |
|
params["fnum"] = frame_selected |
|
|
|
bg = cv2.cvtColor(d["background"], cv2.COLOR_RGBA2GRAY) |
|
bg[bg==255] = 0 |
|
|
|
edge = bg.copy() |
|
else: |
|
bg = edge.copy() |
|
|
|
x = points[len(points)-1][0] |
|
y = points[len(points)-1][1] |
|
|
|
mask = cv2.imread(masks[frame_selected], cv2.IMREAD_GRAYSCALE).astype(np.uint8) |
|
mask[mask==128] = 0 |
|
print(mask[mask>0]-128) |
|
d["layers"][0] = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGBA) |
|
|
|
sel = cv2.floodFill(mask, None, (x, y), 1, 2, 2, (4 | cv2.FLOODFILL_FIXED_RANGE))[2] |
|
|
|
sel = sel[1:sel.shape[0]-1, 1:sel.shape[1]-1] |
|
|
|
d["layers"][0][sel==0] = (0,0,0,0) |
|
|
|
|
|
mask = cv2.cvtColor(d["layers"][0], cv2.COLOR_RGBA2GRAY) |
|
mask[mask==0] = 128 |
|
print(mask[mask>128]-128) |
|
mask, bgdModel, fgdModel = cv2.grabCut(cv2.cvtColor(d["background"], cv2.COLOR_RGBA2RGB), mask-128, None,None,None,15, cv2.GC_INIT_WITH_MASK) |
|
mask = np.where((mask==2)|(mask==0),0,1).astype('uint8') |
|
|
|
frame = cv2.imread(frames[frame_selected], cv2.IMREAD_UNCHANGED).astype(np.uint8) |
|
frame[mask>0] = (255,255,255) |
|
cv2.imwrite(frames[frame_selected], frame) |
|
|
|
switch_rows(False) |
|
return gr.ImageEditor(value=d) |
|
|
|
|
|
load_model=""" |
|
async(c, o, p, d, n, m)=>{ |
|
var intv = setInterval(function(){ |
|
if (document.getElementById("model3D").getElementsByTagName("canvas")[0]) { |
|
try { |
|
if (typeof BABYLON !== "undefined" && BABYLON.Engine && BABYLON.Engine.LastCreatedScene) { |
|
BABYLON.Engine.LastCreatedScene.onAfterRenderObservable.add(function() { //onDataLoadedObservable |
|
|
|
var then = new Date().getTime(); |
|
var now, delta; |
|
const interval = 1000 / 25; |
|
const tolerance = 0.1; |
|
BABYLON.Engine.LastCreatedScene.getEngine().stopRenderLoop(); |
|
BABYLON.Engine.LastCreatedScene.getEngine().runRenderLoop(function () { |
|
now = new Date().getTime(); |
|
delta = now - then; |
|
then = now - (delta % interval); |
|
if (delta >= interval - tolerance) { |
|
BABYLON.Engine.LastCreatedScene.render(); |
|
} |
|
}); |
|
|
|
BABYLON.Engine.LastCreatedScene.getEngine().setHardwareScalingLevel(1.0); |
|
BABYLON.Engine.LastCreatedScene.clearColor = new BABYLON.Color4(255,255,255,255); |
|
BABYLON.Engine.LastCreatedScene.ambientColor = new BABYLON.Color4(255,255,255,255); |
|
//BABYLON.Engine.LastCreatedScene.autoClear = false; |
|
//BABYLON.Engine.LastCreatedScene.autoClearDepthAndStencil = false; |
|
/*for (var i=0; i<BABYLON.Engine.LastCreatedScene.getNodes().length; i++) { |
|
if (BABYLON.Engine.LastCreatedScene.getNodes()[i].material) { |
|
BABYLON.Engine.LastCreatedScene.getNodes()[i].material.pointSize = Math.ceil(Math.log2(Math.PI/document.getElementById("zoom").value)); |
|
} |
|
}*/ |
|
BABYLON.Engine.LastCreatedScene.getAnimationRatio(); |
|
//BABYLON.Engine.LastCreatedScene.activeCamera.inertia = 0.0; |
|
}); |
|
if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) { |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata = { |
|
pipeline: new BABYLON.DefaultRenderingPipeline("default", true, BABYLON.Engine.LastCreatedScene, [BABYLON.Engine.LastCreatedScene.activeCamera]) |
|
} |
|
} |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.samples = 4; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.contrast = 1.0; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.exposure = 1.0; |
|
|
|
BABYLON.Engine.LastCreatedScene.activeCamera.fov = document.getElementById("zoom").value; |
|
|
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].style.filter = "blur(" + Math.ceil(Math.log2(Math.PI/document.getElementById("zoom").value))/2.0*Math.sqrt(2.0) + "px)"; |
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].oncontextmenu = function(e){e.preventDefault();} |
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].ondrag = function(e){e.preventDefault();} |
|
|
|
if (o.indexOf(""+n) < 0) { |
|
if (o != "") { o += ","; } |
|
o += n; |
|
} |
|
//alert(o); |
|
var o_ = o.split(","); |
|
var q = BABYLON.Engine.LastCreatedScene.meshes; |
|
for(i = 0; i < q.length; i++) { |
|
let mesh = q[i]; |
|
mesh.dispose(false, true); |
|
} |
|
var dome = []; |
|
for (var j=0; j<o_.length; j++) { |
|
o_[j] = parseInt(o_[j]); |
|
dome[j] = new BABYLON.PhotoDome("dome"+j, p[o_[j]].image.url, |
|
{ |
|
resolution: 16, |
|
size: 512 |
|
}, BABYLON.Engine.LastCreatedScene); |
|
var q = BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-2]._children; |
|
for(i = 0; i < q.length; i++) { |
|
let mesh = q[i]; |
|
mesh.dispose(false, true); |
|
} |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].name = "dome"+j; |
|
//BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].material.needDepthPrePass = true; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].scaling.z = -1; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].alphaIndex = o_.length-j; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].material.diffuseTexture.hasAlpha = true; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].material.useAlphaFromDiffuseTexture = true; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].applyDisplacementMap(m[o_[j]].url, 0, 255, function(m){try{alert(BABYLON.Engine.Version);}catch(e){alert(e);}}, null, null, true, function(e){alert(e);}); |
|
} |
|
if (document.getElementById("model")) { |
|
document.getElementById("model").appendChild(document.getElementById("model3D")); |
|
toggleDisplay("model"); |
|
} |
|
clearInterval(intv); |
|
} |
|
} catch(e) {alert(e);} |
|
} |
|
}, 40); |
|
} |
|
""" |
|
|
|
js = """ |
|
async()=>{ |
|
console.log('Hi'); |
|
|
|
const chart = document.getElementById('chart'); |
|
const blur_in = document.getElementById('blur_in').getElementsByTagName('textarea')[0]; |
|
var md = false; |
|
var xold = 128; |
|
var yold = 32; |
|
var a = new Array(256); |
|
var l; |
|
|
|
for (var i=0; i<256; i++) { |
|
const hr = document.createElement('hr'); |
|
hr.style.backgroundColor = 'hsl(0,0%,' + (100-i/256*100) + '%)'; |
|
chart.appendChild(hr); |
|
} |
|
|
|
function resetLine() { |
|
a.fill(1); |
|
for (var i=0; i<256; i++) { |
|
chart.childNodes[i].style.height = a[i] + 'px'; |
|
chart.childNodes[i].style.marginTop = '32px'; |
|
} |
|
} |
|
resetLine(); |
|
window.resetLine = resetLine; |
|
|
|
function pointerDown(x, y) { |
|
md = true; |
|
xold = parseInt(x - chart.getBoundingClientRect().x); |
|
yold = parseInt(y - chart.getBoundingClientRect().y); |
|
chart.title = xold + ',' + yold; |
|
} |
|
window.pointerDown = pointerDown; |
|
|
|
function pointerUp() { |
|
md = false; |
|
var evt = document.createEvent('Event'); |
|
evt.initEvent('input', true, false); |
|
blur_in.dispatchEvent(evt); |
|
chart.title = ''; |
|
} |
|
window.pointerUp = pointerUp; |
|
|
|
function lerp(y1, y2, mu) { return y1*(1-mu)+y2*mu; } |
|
|
|
function drawLine(x, y) { |
|
x = parseInt(x - chart.getBoundingClientRect().x); |
|
y = parseInt(y - chart.getBoundingClientRect().y); |
|
if (md === true && y >= 0 && y < 64 && x >= 0 && x < 256) { |
|
if (y < 32) { |
|
a[x] = Math.abs(32-y)*2 + 1; |
|
chart.childNodes[x].style.height = a[x] + 'px'; |
|
chart.childNodes[x].style.marginTop = y + 'px'; |
|
|
|
for (var i=Math.min(xold, x)+1; i<Math.max(xold, x); i++) { |
|
l = parseInt(lerp( yold, y, (i-xold)/(x-xold) )); |
|
|
|
if (l < 32) { |
|
a[i] = Math.abs(32-l)*2 + 1; |
|
chart.childNodes[i].style.height = a[i] + 'px'; |
|
chart.childNodes[i].style.marginTop = l + 'px'; |
|
} else if (l < 64) { |
|
a[i] = Math.abs(l-32)*2 + 1; |
|
chart.childNodes[i].style.height = a[i] + 'px'; |
|
chart.childNodes[i].style.marginTop = (64-l) + 'px'; |
|
} |
|
} |
|
} else if (y < 64) { |
|
a[x] = Math.abs(y-32)*2 + 1; |
|
chart.childNodes[x].style.height = a[x] + 'px'; |
|
chart.childNodes[x].style.marginTop = (64-y) + 'px'; |
|
|
|
for (var i=Math.min(xold, x)+1; i<Math.max(xold, x); i++) { |
|
l = parseInt(lerp( yold, y, (i-xold)/(x-xold) )); |
|
|
|
if (l < 32) { |
|
a[i] = Math.abs(32-l)*2 + 1; |
|
chart.childNodes[i].style.height = a[i] + 'px'; |
|
chart.childNodes[i].style.marginTop = l + 'px'; |
|
} else if (l < 64) { |
|
a[i] = Math.abs(l-32)*2 + 1; |
|
chart.childNodes[i].style.height = a[i] + 'px'; |
|
chart.childNodes[i].style.marginTop = (64-l) + 'px'; |
|
} |
|
} |
|
} |
|
blur_in.value = a.join(' '); |
|
xold = x; |
|
yold = y; |
|
chart.title = xold + ',' + yold; |
|
} |
|
} |
|
window.drawLine = drawLine; |
|
|
|
|
|
var intv_ = setInterval(function(){ |
|
if (document.getElementById("image_edit") && document.getElementById("image_edit").getElementsByTagName("canvas")) { |
|
document.getElementById("image_edit").getElementsByTagName("canvas")[0].oncontextmenu = function(e){e.preventDefault();} |
|
document.getElementById("image_edit").getElementsByTagName("canvas")[0].ondrag = function(e){e.preventDefault();} |
|
|
|
document.getElementById("image_edit").getElementsByTagName("canvas")[0].onclick = function(e) { |
|
var x = parseInt((e.clientX-e.target.getBoundingClientRect().x)*e.target.width/e.target.getBoundingClientRect().width); |
|
var y = parseInt((e.clientY-e.target.getBoundingClientRect().y)*e.target.height/e.target.getBoundingClientRect().height); |
|
|
|
var p = document.getElementById("mouse").getElementsByTagName("textarea")[0].value.slice(1, -1); |
|
if (p != "") { p += ", "; } |
|
p += "[" + x + ", " + y + "]"; |
|
document.getElementById("mouse").getElementsByTagName("textarea")[0].value = "[" + p + "]"; |
|
|
|
var evt = document.createEvent("Event"); |
|
evt.initEvent("input", true, false); |
|
document.getElementById("mouse").getElementsByTagName("textarea")[0].dispatchEvent(evt); |
|
} |
|
document.getElementById("image_edit").getElementsByTagName("canvas")[0].onfocus = function(e) { |
|
document.getElementById("mouse").getElementsByTagName("textarea")[0].value = "[]"; |
|
} |
|
document.getElementById("image_edit").getElementsByTagName("canvas")[0].onblur = function(e) { |
|
document.getElementById("mouse").getElementsByTagName("textarea")[0].value = "[]"; |
|
} |
|
clearInterval(intv_); |
|
} |
|
}, 40); |
|
|
|
} |
|
""" |
|
|
|
css = """ |
|
#img-display-container { |
|
max-height: 100vh; |
|
} |
|
#img-display-input { |
|
max-height: 80vh; |
|
} |
|
#img-display-output { |
|
max-height: 80vh; |
|
} |
|
""" |
|
|
|
head = """ |
|
""" |
|
|
|
title = "# Depth Anything V2 Video" |
|
description = """**Depth Anything V2** on full video files, intended for Google Street View panorama slideshows. |
|
Please refer to the [paper](https://arxiv.org/abs/2406.09414), [project page](https://depth-anything-v2.github.io), and [github](https://github.com/DepthAnything/Depth-Anything-V2) for more details.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(css=css, js=js, head=head) as demo: |
|
gr.Markdown(title) |
|
gr.Markdown(description) |
|
gr.Markdown("### Video Depth Prediction demo") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Group(): |
|
input_json = gr.Textbox(elem_id="json_in", value="{}", label="JSON", interactive=False) |
|
input_url = gr.Textbox(elem_id="url_in", value="./examples/streetview.mp4", label="URL") |
|
input_video = gr.Video(label="Input Video", format="mp4") |
|
input_url.input(fn=loadfile, inputs=[input_url], outputs=[input_video]) |
|
submit = gr.Button("Submit") |
|
with gr.Group(): |
|
output_frame = gr.Gallery(label="Frames", preview=True, columns=8192, interactive=False) |
|
output_switch = gr.Checkbox(label="Show depths") |
|
output_switch.input(fn=switch_rows, inputs=[output_switch], outputs=[output_frame]) |
|
selected = gr.Number(label="Selected frame", visible=False, elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False) |
|
with gr.Accordion(label="Depths", open=False): |
|
output_depth = gr.Files(label="Depth files", interactive=False) |
|
with gr.Group(): |
|
output_mask = gr.ImageEditor(layers=False, sources=('clipboard'), show_download_button=True, type="numpy", interactive=True, transforms=(None,), eraser=gr.Eraser(), brush=gr.Brush(default_size=0, colors=['black', '#505050', '#a0a0a0', 'white']), elem_id="image_edit") |
|
with gr.Accordion(label="Border", open=False): |
|
boffset = gr.Slider(label="Inner", value=1, maximum=256, minimum=0, step=1) |
|
bsize = gr.Slider(label="Outer", value=32, maximum=256, minimum=0, step=1) |
|
mouse = gr.Textbox(label="Mouse x,y", elem_id="mouse", value="""[]""", interactive=False) |
|
reset = gr.Button("Reset", size='sm') |
|
mouse.input(fn=draw_mask, show_progress="minimal", inputs=[boffset, bsize, mouse, output_mask], outputs=[output_mask]) |
|
reset.click(fn=reset_mask, inputs=[output_mask], outputs=[output_mask]) |
|
|
|
with gr.Column(): |
|
model_type = gr.Dropdown([("small", "vits"), ("base", "vitb"), ("large", "vitl"), ("giant", "vitg")], type="value", value="vits", label='Model Type') |
|
processed_video = gr.Video(label="Output Video", format="mp4", interactive=False) |
|
processed_zip = gr.File(label="Output Archive", interactive=False) |
|
result = gr.Model3D(label="3D Mesh", clear_color=[0.5, 0.5, 0.5, 0.0], camera_position=[0, 90, 0], zoom_speed=2.0, pan_speed=2.0, interactive=True, elem_id="model3D") |
|
with gr.Tab("Blur"): |
|
chart_c = gr.HTML(elem_id="chart_c", value="""<div id='chart' onpointermove='window.drawLine(event.clientX, event.clientY);' onpointerdown='window.pointerDown(event.clientX, event.clientY);' onpointerup='window.pointerUp();' onpointerleave='window.pointerUp();' onpointercancel='window.pointerUp();' onclick='window.resetLine();'></div> |
|
<style> |
|
* { |
|
user-select: none; |
|
} |
|
#chart hr { |
|
width: 1px; |
|
height: 1px; |
|
clear: none; |
|
border: 0; |
|
padding:0; |
|
display: inline-block; |
|
position: relative; |
|
vertical-align: top; |
|
margin-top:32px; |
|
} |
|
#chart { |
|
padding:0; |
|
margin:0; |
|
width:256px; |
|
height:64px; |
|
background-color:#808080; |
|
touch-action: none; |
|
} |
|
</style> |
|
""") |
|
average = gr.HTML(value="""<label for='average'>Average</label><input id='average' type='range' style='width:256px;height:1em;' value='1' min='1' max='15' step='2' onclick=' |
|
var pts_a = document.getElementById(\"blur_in\").getElementsByTagName(\"textarea\")[0].value.split(\" \"); |
|
for (var i=0; i<256; i++) { |
|
var avg = 0; |
|
var div = this.value; |
|
for (var j = i-parseInt(this.value/2); j <= i+parseInt(this.value/2); j++) { |
|
if (pts_a[j]) { |
|
avg += parseInt(pts_a[j]); |
|
} else if (div > 1) { |
|
div--; |
|
} |
|
} |
|
pts_a[i] = Math.round((avg / div - 1) / 2) * 2 + 1; |
|
|
|
document.getElementById(\"chart\").childNodes[i].style.height = pts_a[i] + \"px\"; |
|
document.getElementById(\"chart\").childNodes[i].style.marginTop = (64-pts_a[i])/2 + \"px\"; |
|
} |
|
document.getElementById(\"blur_in\").getElementsByTagName(\"textarea\")[0].value = pts_a.join(\" \"); |
|
|
|
var evt = document.createEvent(\"Event\"); |
|
evt.initEvent(\"input\", true, false); |
|
document.getElementById(\"blur_in\").getElementsByTagName(\"textarea\")[0].dispatchEvent(evt); |
|
' oninput=' |
|
this.parentNode.childNodes[2].innerText = this.value; |
|
' onchange='this.click();'/><span>1</span>""") |
|
with gr.Accordion(label="Levels", open=False): |
|
blur_in = gr.Textbox(elem_id="blur_in", label="Kernel size", show_label=False, interactive=False, value=blurin) |
|
with gr.Group(): |
|
with gr.Accordion(label="Locations", open=False): |
|
output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected]) |
|
example_coords = """[ |
|
{"lat": 50.07379596793083, "lng": 14.437146122950555, "heading": 152.70303, "pitch": 2.607833999999997}, |
|
{"lat": 50.073799567020004, "lng": 14.437146774240507, "heading": 151.12973, "pitch": 2.8672300000000064}, |
|
{"lat": 50.07377647505558, "lng": 14.437161000659017, "heading": 151.41025, "pitch": 3.4802200000000028}, |
|
{"lat": 50.07379496839027, "lng": 14.437148958238538, "heading": 151.93391, "pitch": 2.843050000000005}, |
|
{"lat": 50.073823157821664, "lng": 14.437124189538856, "heading": 152.95769, "pitch": 4.233024999999998} |
|
]""" |
|
coords = gr.Textbox(elem_id="coords", value=example_coords, label="Coordinates", interactive=False) |
|
mesh_order = gr.Textbox(elem_id="order", value="", label="Order", interactive=False) |
|
load_all = gr.Checkbox(label="Load all") |
|
|
|
with gr.Group(): |
|
camera = gr.HTML(value="""<a href='#' id='reset_cam' style='float:right;color:white' onclick=' |
|
if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) { |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata = { |
|
screenshot: true, |
|
pipeline: new BABYLON.DefaultRenderingPipeline(\"default\", true, BABYLON.Engine.LastCreatedScene, [BABYLON.Engine.LastCreatedScene.activeCamera]) |
|
} |
|
} |
|
BABYLON.Engine.LastCreatedScene.activeCamera.radius = 0; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.samples = 4; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.fov = document.getElementById(\"zoom\").value; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.contrast = document.getElementById(\"contrast\").value; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.exposure = document.getElementById(\"exposure\").value; |
|
|
|
document.getElementById(\"model3D\").getElementsByTagName(\"canvas\")[0].style.filter = \"blur(\" + Math.ceil(Math.log2(Math.PI/document.getElementById(\"zoom\").value))/2.0*Math.sqrt(2.0) + \"px)\"; |
|
document.getElementById(\"model3D\").getElementsByTagName(\"canvas\")[0].oncontextmenu = function(e){e.preventDefault();} |
|
document.getElementById(\"model3D\").getElementsByTagName(\"canvas\")[0].ondrag = function(e){e.preventDefault();} |
|
'>Reset camera</a>""") |
|
html = gr.HTML(value="""<label for='zoom' style='width:5em'>Zoom</label><input id='zoom' type='range' style='width:256px;height:1em;' value='0.8' min='0.157' max='1.57' step='0.001' oninput=' |
|
if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) { |
|
var evt = document.createEvent(\"Event\"); |
|
evt.initEvent(\"click\", true, false); |
|
document.getElementById(\"reset_cam\").dispatchEvent(evt); |
|
} |
|
BABYLON.Engine.LastCreatedScene.activeCamera.fov = this.value; |
|
this.parentNode.childNodes[2].innerText = BABYLON.Engine.LastCreatedScene.activeCamera.fov; |
|
|
|
document.getElementById(\"model3D\").getElementsByTagName(\"canvas\")[0].style.filter = \"blur(\" + BABYLON.Engine.LastCreatedScene.getNodes()[parseInt(document.getElementById(\"fnum\").getElementsByTagName(\"input\")[0].value)+1].material.pointSize/2.0*Math.sqrt(2.0) + \"px)\"; |
|
'/><span>0.8</span>""") |
|
contrast = gr.HTML(value="""<label for='contrast' style='width:5em'>Contrast</label><input id='contrast' type='range' style='width:256px;height:1em;' value='1.0' min='0' max='2' step='0.001' oninput=' |
|
if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) { |
|
var evt = document.createEvent(\"Event\"); |
|
evt.initEvent(\"click\", true, false); |
|
document.getElementById(\"reset_cam\").dispatchEvent(evt); |
|
} |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.contrast = this.value; |
|
this.parentNode.childNodes[2].innerText = BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.contrast; |
|
'/><span>1.0</span>""") |
|
exposure = gr.HTML(value="""<label for='exposure' style='width:5em'>Exposure</label><input id='exposure' type='range' style='width:256px;height:1em;' value='1.0' min='0' max='2' step='0.001' oninput=' |
|
if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) { |
|
var evt = document.createEvent(\"Event\"); |
|
evt.initEvent(\"click\", true, false); |
|
document.getElementById(\"reset_cam\").dispatchEvent(evt); |
|
} |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.exposure = this.value; |
|
this.parentNode.childNodes[2].innerText = BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.exposure; |
|
'/><span>1.0</span>""") |
|
render = gr.Button("Render") |
|
input_json.input(show_json, inputs=[input_json], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords]) |
|
|
|
def on_submit(uploaded_video,model_type,blur_in,boffset,bsize,coordinates): |
|
global locations |
|
locations = [] |
|
avg = [0, 0] |
|
|
|
locations = json.loads(coordinates) |
|
for k, location in enumerate(locations): |
|
if "tiles" in locations[k]: |
|
locations[k]["heading"] = locations[k]["tiles"]["originHeading"] |
|
locations[k]["pitch"] = locations[k]["tiles"]["originPitch"] |
|
else: |
|
locations[k]["heading"] = 0 |
|
locations[k]["pitch"] = 0 |
|
|
|
if "location" in locations[k]: |
|
locations[k] = locations[k]["location"]["latLng"] |
|
avg[0] = avg[0] + locations[k]["lat"] |
|
avg[1] = avg[1] + locations[k]["lng"] |
|
else: |
|
locations[k]["lat"] = 0 |
|
locations[k]["lng"] = 0 |
|
|
|
if len(locations) > 0: |
|
avg[0] = avg[0] / len(locations) |
|
avg[1] = avg[1] / len(locations) |
|
|
|
for k, location in enumerate(locations): |
|
lat = vincenty((location["lat"], 0), (avg[0], 0)) * 1000 |
|
lng = vincenty((0, location["lng"]), (0, avg[1])) * 1000 |
|
locations[k]["lat"] = float(lat / 2.5 * 111 * np.sign(location["lat"]-avg[0])) |
|
locations[k]["lng"] = float(lng / 2.5 * 111 * np.sign(location["lng"]-avg[1])) |
|
print(locations) |
|
|
|
|
|
|
|
|
|
output_video_path = make_video(uploaded_video,encoder=model_type,blur_data=blurin,o=boffset,b=bsize) |
|
|
|
return output_video_path + (json.dumps(locations),) |
|
|
|
submit.click(on_submit, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords]) |
|
render.click(None, inputs=[coords, mesh_order, output_frame, output_mask, selected, output_depth], outputs=None, js=load_model) |
|
render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all], outputs=[result, mesh_order]) |
|
|
|
example_files = [["./examples/streetview.mp4", "vits", blurin, 1, 32, example_coords]] |
|
examples = gr.Examples(examples=example_files, fn=on_submit, cache_examples=True, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords]) |
|
|
|
|
|
if __name__ == '__main__': |
|
demo.queue().launch() |