|
import gradio as gr |
|
import cv2 |
|
from PIL import Image |
|
import numpy as np |
|
import os |
|
import torch |
|
import torch.nn.functional as F |
|
from torchvision import transforms |
|
from torchvision.transforms import Compose |
|
import tempfile |
|
from functools import partial |
|
import spaces |
|
from zipfile import ZipFile |
|
from vincenty import vincenty |
|
import json |
|
from collections import Counter |
|
import mediapy |
|
|
|
|
|
|
|
from huggingface_hub import hf_hub_download |
|
from depth_anything_v2.dpt import DepthAnythingV2 |
|
|
|
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
model_configs = { |
|
'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, |
|
'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, |
|
'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, |
|
'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]} |
|
} |
|
encoder2name = { |
|
'vits': 'Small', |
|
'vitb': 'Base', |
|
'vitl': 'Large', |
|
'vitg': 'Giant', |
|
} |
|
|
|
blurin = "1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1" |
|
edge = [] |
|
gradient = None |
|
params = { "fnum":0 } |
|
pcolors = [] |
|
frame_selected = 0 |
|
frames = [] |
|
backups = [] |
|
depths = [] |
|
masks = [] |
|
locations = [] |
|
mesh = [] |
|
mesh_n = [] |
|
scene = None |
|
|
|
def zip_files(files_in, files_out): |
|
with ZipFile("depth_result.zip", "w") as zipObj: |
|
for idx, file in enumerate(files_in): |
|
zipObj.write(file, file.split("/")[-1]) |
|
for idx, file in enumerate(files_out): |
|
zipObj.write(file, file.split("/")[-1]) |
|
return "depth_result.zip" |
|
|
|
def create_video(frames, fps, type): |
|
print("building video result") |
|
imgs = [] |
|
for j, img in enumerate(frames): |
|
imgs.append(cv2.cvtColor(cv2.imread(img).astype(np.uint8), cv2.COLOR_BGR2RGB)) |
|
|
|
mediapy.write_video(type + "_result.mp4", imgs, fps=fps) |
|
return type + "_result.mp4" |
|
|
|
@torch.no_grad() |
|
|
|
def predict_depth(image, model): |
|
return model.infer_image(image) |
|
|
|
|
|
|
|
|
|
def make_video(video_path, outdir='./vis_video_depth', encoder='vits', blur_data=blurin, o=1, b=32): |
|
if encoder not in ["vitl","vitb","vits","vitg"]: |
|
encoder = "vits" |
|
|
|
model_name = encoder2name[encoder] |
|
model = DepthAnythingV2(**model_configs[encoder]) |
|
filepath = hf_hub_download(repo_id=f"depth-anything/Depth-Anything-V2-{model_name}", filename=f"depth_anything_v2_{encoder}.pth", repo_type="model") |
|
state_dict = torch.load(filepath, map_location="cpu") |
|
model.load_state_dict(state_dict) |
|
model = model.to(DEVICE).eval() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if os.path.isfile(video_path): |
|
if video_path.endswith('txt'): |
|
with open(video_path, 'r') as f: |
|
lines = f.read().splitlines() |
|
else: |
|
filenames = [video_path] |
|
else: |
|
filenames = os.listdir(video_path) |
|
filenames = [os.path.join(video_path, filename) for filename in filenames if not filename.startswith('.')] |
|
filenames.sort() |
|
|
|
|
|
global masks |
|
|
|
for k, filename in enumerate(filenames): |
|
file_size = os.path.getsize(filename)/1024/1024 |
|
if file_size > 128.0: |
|
print(f'File size of {filename} larger than 128Mb, sorry!') |
|
return filename |
|
print('Progress {:}/{:},'.format(k+1, len(filenames)), 'Processing', filename) |
|
|
|
raw_video = cv2.VideoCapture(filename) |
|
frame_width, frame_height = int(raw_video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(raw_video.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
|
frame_rate = int(raw_video.get(cv2.CAP_PROP_FPS)) |
|
if frame_rate < 1: |
|
frame_rate = 1 |
|
cframes = int(raw_video.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
print(f'frames: {cframes}, fps: {frame_rate}') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
count = 0 |
|
n = 0 |
|
depth_frames = [] |
|
orig_frames = [] |
|
backup_frames = [] |
|
thumbnail_old = [] |
|
|
|
while raw_video.isOpened(): |
|
ret, raw_frame = raw_video.read() |
|
if not ret: |
|
break |
|
else: |
|
print(count) |
|
|
|
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2RGB) / 255.0 |
|
frame_pil = Image.fromarray((frame * 255).astype(np.uint8)) |
|
|
|
|
|
|
|
|
|
|
|
depth = predict_depth(raw_frame[:, :, ::-1], model) |
|
depth_gray = ((depth - depth.min()) / (depth.max() - depth.min()) * 255.0).astype(np.uint8) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mask = cv2.inRange(depth_gray[0:int(depth_gray.shape[0]/8*7)-1, 0:depth_gray.shape[1]], 250, 255) |
|
|
|
depth_gray[0:int(depth_gray.shape[0]/8*7)-1, 0:depth_gray.shape[1]][mask>0] = 0 |
|
|
|
mask = cv2.inRange(depth_gray[int(depth_gray.shape[0]/8*7):depth_gray.shape[0], 0:depth_gray.shape[1]], 192, 255) |
|
depth_gray[int(depth_gray.shape[0]/8*7):depth_gray.shape[0], 0:depth_gray.shape[1]][mask>0] = 192 |
|
|
|
depth_color = cv2.cvtColor(depth_gray, cv2.COLOR_GRAY2BGR) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if cframes < 16: |
|
thumbnail = cv2.cvtColor(cv2.resize(raw_frame, (16,32)), cv2.COLOR_BGR2GRAY).flatten() |
|
if len(thumbnail_old) > 0: |
|
diff = thumbnail - thumbnail_old |
|
|
|
c = Counter(diff) |
|
value, cc = c.most_common()[0] |
|
if value == 0 and cc > int(16*32*0.8): |
|
count += 1 |
|
continue |
|
thumbnail_old = thumbnail |
|
|
|
blur_frame = blur_image(raw_frame, depth_color, blur_data) |
|
|
|
cv2.imwrite(f"f{count}.jpg", blur_frame) |
|
orig_frames.append(f"f{count}.jpg") |
|
|
|
cv2.imwrite(f"f{count}_.jpg", blur_frame) |
|
backup_frames.append(f"f{count}_.jpg") |
|
|
|
cv2.imwrite(f"f{count}_dmap.jpg", depth_color) |
|
depth_frames.append(f"f{count}_dmap.jpg") |
|
|
|
depth_gray = seg_frame(depth_gray, o, b) + 128 |
|
|
|
|
|
cv2.imwrite(f"f{count}_mask.jpg", depth_gray) |
|
masks.append(f"f{count}_mask.jpg") |
|
count += 1 |
|
|
|
final_vid = create_video(orig_frames, frame_rate, "orig") |
|
depth_vid = create_video(depth_frames, frame_rate, "depth") |
|
|
|
final_zip = zip_files(orig_frames, depth_frames) |
|
raw_video.release() |
|
|
|
cv2.destroyAllWindows() |
|
|
|
global gradient |
|
global frame_selected |
|
global depths |
|
global frames |
|
global backups |
|
frames = orig_frames |
|
backups = backup_frames |
|
depths = depth_frames |
|
|
|
if depth_color.shape[0] == 2048: |
|
gradient = cv2.imread('./gradient_large.png').astype(np.uint8) |
|
elif depth_color.shape[0] == 1024: |
|
gradient = cv2.imread('./gradient.png').astype(np.uint8) |
|
else: |
|
gradient = cv2.imread('./gradient_small.png').astype(np.uint8) |
|
|
|
return final_vid, final_zip, frames, masks[frame_selected], depths, depth_vid |
|
|
|
def depth_edges_mask(depth): |
|
"""Returns a mask of edges in the depth map. |
|
Args: |
|
depth: 2D numpy array of shape (H, W) with dtype float32. |
|
Returns: |
|
mask: 2D numpy array of shape (H, W) with dtype bool. |
|
""" |
|
|
|
depth_dx, depth_dy = np.gradient(depth) |
|
|
|
depth_grad = np.sqrt(depth_dx ** 2 + depth_dy ** 2) |
|
|
|
mask = depth_grad > 0.05 |
|
return mask |
|
|
|
def pano_depth_to_world_points(depth): |
|
""" |
|
360 depth to world points |
|
given 2D depth is an equirectangular projection of a spherical image |
|
Treat depth as radius |
|
longitude : -pi to pi |
|
latitude : -pi/2 to pi/2 |
|
""" |
|
|
|
|
|
radius = (255 - depth.flatten()) |
|
|
|
lon = np.linspace(0, np.pi*2, depth.shape[1]) |
|
lat = np.linspace(0, np.pi, depth.shape[0]) |
|
lon, lat = np.meshgrid(lon, lat) |
|
lon = lon.flatten() |
|
lat = lat.flatten() |
|
|
|
pts3d = [[0,0,0]] |
|
uv = [[0,0]] |
|
nl = [[0,0,0]] |
|
for i in range(0, 1): |
|
for j in range(0, 1): |
|
|
|
|
|
d_lon = lon + i/2 * np.pi*2 / depth.shape[1] |
|
d_lat = lat + j/2 * np.pi / depth.shape[0] |
|
|
|
nx = np.cos(d_lon) * np.sin(d_lat) |
|
ny = np.cos(d_lat) |
|
nz = np.sin(d_lon) * np.sin(d_lat) |
|
|
|
|
|
x = radius * nx |
|
y = radius * ny |
|
z = radius * nz |
|
|
|
pts = np.stack([x, y, z], axis=1) |
|
uvs = np.stack([lon/np.pi/2, lat/np.pi], axis=1) |
|
nls = np.stack([-nx, -ny, -nz], axis=1) |
|
|
|
pts3d = np.concatenate((pts3d, pts), axis=0) |
|
uv = np.concatenate((uv, uvs), axis=0) |
|
nl = np.concatenate((nl, nls), axis=0) |
|
|
|
j = j+1 |
|
i = i+1 |
|
|
|
return [pts3d, uv, nl] |
|
|
|
def rgb2gray(rgb): |
|
return np.dot(rgb[...,:3], [0.333, 0.333, 0.333]) |
|
|
|
def get_mesh(image, depth, blur_data, loadall): |
|
global depths |
|
global pcolors |
|
global frame_selected |
|
global mesh |
|
global mesh_n |
|
global scene |
|
if loadall == False: |
|
mesh = [] |
|
mesh_n = [] |
|
fnum = frame_selected |
|
|
|
|
|
|
|
|
|
depthc = cv2.imread(depths[frame_selected], cv2.IMREAD_UNCHANGED).astype(np.uint8) |
|
blur_img = blur_image(cv2.imread(image[fnum][0], cv2.IMREAD_UNCHANGED).astype(np.uint8), depthc, blur_data) |
|
gdepth = cv2.cvtColor(depthc, cv2.COLOR_RGB2GRAY) |
|
|
|
print('depth to gray - ok') |
|
points = pano_depth_to_world_points(gdepth) |
|
pts3d = points[0] |
|
uv = points[1] |
|
nl = points[2] |
|
print('radius from depth - ok') |
|
|
|
|
|
|
|
|
|
uvs = uv.reshape(-1, 2) |
|
|
|
|
|
verts = [[0,0,0]] |
|
normals = nl.reshape(-1, 3) |
|
rgba = cv2.cvtColor(blur_img, cv2.COLOR_RGB2RGBA) |
|
colors = rgba.reshape(-1, 4) |
|
clrs = [[128,128,128,0]] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not str(fnum) in mesh_n: |
|
mesh_n.append(str(fnum)) |
|
print('mesh - ok') |
|
|
|
|
|
|
|
|
|
|
|
return "./TriangleWithoutIndices.gltf", ",".join(mesh_n) |
|
|
|
|
|
def blur_image(image, depth, blur_data): |
|
blur_a = blur_data.split() |
|
|
|
|
|
blur_frame = image.copy() |
|
j = 0 |
|
while j < 256: |
|
i = 255 - j |
|
blur_lo = np.array([i,i,i]) |
|
blur_hi = np.array([i+1,i+1,i+1]) |
|
blur_mask = cv2.inRange(depth, blur_lo, blur_hi) |
|
|
|
|
|
blur = cv2.GaussianBlur(image, (int(blur_a[j]), int(blur_a[j])), 0) |
|
|
|
blur_frame[blur_mask>0] = blur[blur_mask>0] |
|
j = j + 1 |
|
|
|
white = cv2.inRange(blur_frame, np.array([255,255,255]), np.array([255,255,255])) |
|
blur_frame[white>0] = (254,254,254) |
|
|
|
return blur_frame |
|
|
|
def loadfile(f): |
|
return f |
|
|
|
def show_json(txt): |
|
data = json.loads(txt) |
|
print(txt) |
|
i=0 |
|
while i < len(data[2]): |
|
data[2][i] = data[2][i]["image"]["path"] |
|
data[4][i] = data[4][i]["path"] |
|
i=i+1 |
|
return data[0]["video"]["path"], data[1]["path"], data[2], data[3]["background"]["path"], data[4], data[5] |
|
|
|
|
|
def seg_frame(newmask, b, d): |
|
|
|
if newmask.shape[0] == 2048: |
|
gd = cv2.imread('./gradient_large.png', cv2.IMREAD_GRAYSCALE).astype(np.uint8) |
|
elif newmask.shape[0] == 1024: |
|
gd = cv2.imread('./gradient.png', cv2.IMREAD_GRAYSCALE).astype(np.uint8) |
|
else: |
|
gd = cv2.imread('./gradient_small.png', cv2.IMREAD_GRAYSCALE).astype(np.uint8) |
|
|
|
newmask[np.absolute(newmask.astype(np.int16)-gd.astype(np.int16))<16] = 0 |
|
ret,newmask = cv2.threshold(newmask,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) |
|
|
|
|
|
|
|
element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * b + 1, 2 * b + 1), (b, b)) |
|
bd = cv2.erode(newmask, element) |
|
element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * d + 1, 2 * d + 1), (d, d)) |
|
bg = cv2.dilate(newmask, element) |
|
bg[bg.shape[0]-64:bg.shape[0],0:bg.shape[1]] = 0 |
|
|
|
mask = np.zeros(newmask.shape[:2],np.uint8) |
|
|
|
|
|
|
|
mask[bg == 255] = 3 |
|
mask[bd == 255] = 1 |
|
|
|
return mask |
|
|
|
|
|
def select_frame(d, evt: gr.SelectData): |
|
global frame_selected |
|
global depths |
|
global masks |
|
global edge |
|
|
|
if evt.index != frame_selected: |
|
edge = [] |
|
frame_selected = evt.index |
|
|
|
return depths[frame_selected], frame_selected |
|
|
|
def switch_rows(v): |
|
global frames |
|
global depths |
|
if v == True: |
|
print(depths[0]) |
|
return depths |
|
else: |
|
print(frames[0]) |
|
return frames |
|
|
|
|
|
def bincount(a): |
|
a2D = a.reshape(-1,a.shape[-1]) |
|
col_range = (256, 256, 256) |
|
a1D = np.ravel_multi_index(a2D.T, col_range) |
|
return list(reversed(np.unravel_index(np.bincount(a1D).argmax(), col_range))) |
|
|
|
def reset_mask(d): |
|
global frame_selected |
|
global frames |
|
global backups |
|
global masks |
|
global depths |
|
global edge |
|
|
|
edge = [] |
|
backup = cv2.imread(backups[frame_selected]).astype(np.uint8) |
|
cv2.imwrite(frames[frame_selected], backup) |
|
|
|
d["layers"][0][0:d["layers"][0].shape[0], 0:d["layers"][0].shape[1]] = (0,0,0,0) |
|
|
|
return gr.ImageEditor(value=d) |
|
|
|
|
|
def draw_mask(o, b, v, d, evt: gr.EventData): |
|
global frames |
|
global depths |
|
global params |
|
global frame_selected |
|
global masks |
|
global gradient |
|
global edge |
|
|
|
points = json.loads(v) |
|
pts = np.array(points, np.int32) |
|
pts = pts.reshape((-1,1,2)) |
|
|
|
if len(edge) == 0 or params["fnum"] != frame_selected: |
|
if params["fnum"] != frame_selected: |
|
d["background"] = cv2.imread(depths[frame_selected]).astype(np.uint8) |
|
params["fnum"] = frame_selected |
|
|
|
bg = cv2.cvtColor(d["background"], cv2.COLOR_RGBA2GRAY) |
|
bg[bg==255] = 0 |
|
|
|
edge = bg.copy() |
|
else: |
|
bg = edge.copy() |
|
|
|
x = points[len(points)-1][0] |
|
y = points[len(points)-1][1] |
|
|
|
mask = cv2.imread(masks[frame_selected], cv2.IMREAD_GRAYSCALE).astype(np.uint8) |
|
mask[mask==128] = 0 |
|
print(mask[mask>0]-128) |
|
d["layers"][0] = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGBA) |
|
|
|
sel = cv2.floodFill(mask, None, (x, y), 1, 2, 2, (4 | cv2.FLOODFILL_FIXED_RANGE))[2] |
|
|
|
sel = sel[1:sel.shape[0]-1, 1:sel.shape[1]-1] |
|
|
|
d["layers"][0][sel==0] = (0,0,0,0) |
|
|
|
|
|
mask = cv2.cvtColor(d["layers"][0], cv2.COLOR_RGBA2GRAY) |
|
mask[mask==0] = 128 |
|
print(mask[mask>128]-128) |
|
mask, bgdModel, fgdModel = cv2.grabCut(cv2.cvtColor(d["background"], cv2.COLOR_RGBA2RGB), mask-128, None,None,None,15, cv2.GC_INIT_WITH_MASK) |
|
mask = np.where((mask==2)|(mask==0),0,1).astype('uint8') |
|
|
|
frame = cv2.imread(frames[frame_selected], cv2.IMREAD_UNCHANGED).astype(np.uint8) |
|
frame[mask>0] = (255,255,255) |
|
cv2.imwrite(frames[frame_selected], frame) |
|
|
|
switch_rows(False) |
|
return gr.ImageEditor(value=d) |
|
|
|
|
|
load_model=""" |
|
async(c, o, p, d, n, m, s)=>{ |
|
var intv = setInterval(function(){ |
|
if (document.getElementById("model3D").getElementsByTagName("canvas")[0]) { |
|
try { |
|
if (typeof BABYLON !== "undefined" && BABYLON.Engine && BABYLON.Engine.LastCreatedScene) { |
|
BABYLON.Engine.LastCreatedScene.onAfterRenderObservable.add(function() { //onDataLoadedObservable |
|
|
|
var then = new Date().getTime(); |
|
var now, delta; |
|
const interval = 1000 / 25; |
|
const tolerance = 0.1; |
|
|
|
BABYLON.Engine.LastCreatedScene.getEngine().stopRenderLoop(); |
|
BABYLON.Engine.LastCreatedScene.getEngine().runRenderLoop(function () { |
|
now = new Date().getTime(); |
|
delta = now - then; |
|
then = now - (delta % interval); |
|
if (delta >= interval - tolerance) { |
|
BABYLON.Engine.LastCreatedScene.render(); |
|
} |
|
}); |
|
|
|
BABYLON.Engine.LastCreatedScene.getEngine().setHardwareScalingLevel(1.0); |
|
BABYLON.Engine.LastCreatedScene.clearColor = new BABYLON.Color4(255,255,255,255); |
|
BABYLON.Engine.LastCreatedScene.ambientColor = new BABYLON.Color4(255,255,255,255); |
|
//BABYLON.Engine.LastCreatedScene.autoClear = false; |
|
//BABYLON.Engine.LastCreatedScene.autoClearDepthAndStencil = false; |
|
/*for (var i=0; i<BABYLON.Engine.LastCreatedScene.getNodes().length; i++) { |
|
if (BABYLON.Engine.LastCreatedScene.getNodes()[i].material) { |
|
BABYLON.Engine.LastCreatedScene.getNodes()[i].material.pointSize = Math.ceil(Math.log2(Math.PI/document.getElementById("zoom").value)); |
|
} |
|
}*/ |
|
BABYLON.Engine.LastCreatedScene.getAnimationRatio(); |
|
}); |
|
|
|
if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) { |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata = { |
|
pipeline: new BABYLON.DefaultRenderingPipeline("default", true, BABYLON.Engine.LastCreatedScene, [BABYLON.Engine.LastCreatedScene.activeCamera]) |
|
} |
|
} |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.samples = 4; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.contrast = 1.0; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.exposure = 1.0; |
|
|
|
//BABYLON.Engine.LastCreatedScene.activeCamera.detachControl(document.getElementById("model3D").getElementsByTagName("canvas")[0]); |
|
BABYLON.Engine.LastCreatedScene.activeCamera.inertia = 0.0; |
|
//pan |
|
BABYLON.Engine.LastCreatedScene.activeCamera.panningInertia = 0.0; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.panningDistanceLimit = 16; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.panningSensibility = 16; |
|
//zoom |
|
BABYLON.Engine.LastCreatedScene.activeCamera.pinchDeltaPercentage = 1/256; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.wheelDeltaPercentage = 1/256; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.upperRadiusLimit = (1.57-0.157)*16; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.lowerRadiusLimit = 0.0; |
|
//BABYLON.Engine.LastCreatedScene.activeCamera.attachControl(document.getElementById("model3D").getElementsByTagName("canvas")[0], false); |
|
|
|
BABYLON.Engine.LastCreatedScene.activeCamera.fov = document.getElementById("zoom").value; |
|
|
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].style.filter = "blur(" + Math.ceil(Math.log2(Math.PI/document.getElementById("zoom").value))/2.0*Math.sqrt(2.0) + "px)"; |
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].oncontextmenu = function(e){e.preventDefault();} |
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].ondrag = function(e){e.preventDefault();} |
|
|
|
document.getElementById("model3D").appendChild(document.getElementById("compass_box")); |
|
window.coords = JSON.parse(document.getElementById("coords").getElementsByTagName("textarea")[0].value); |
|
window.counter = 0; |
|
|
|
if (o.indexOf(""+n) < 0) { |
|
if (o != "") { o += ","; } |
|
o += n; |
|
} |
|
//alert(o); |
|
var o_ = o.split(","); |
|
var q = BABYLON.Engine.LastCreatedScene.meshes; |
|
for(i = 0; i < q.length; i++) { |
|
let mesh = q[i]; |
|
mesh.dispose(false, true); |
|
} |
|
var dome = []; |
|
/*for (var j=0; j<o_.length; j++) { |
|
o_[j] = parseInt(o_[j]); |
|
dome[j] = new BABYLON.PhotoDome("dome"+j, p[o_[j]].image.url, |
|
{ |
|
resolution: 16, |
|
size: 512 |
|
}, BABYLON.Engine.LastCreatedScene); |
|
var q = BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-2]._children; |
|
for(i = 0; i < q.length; i++) { |
|
let mesh = q[i]; |
|
mesh.dispose(false, true); |
|
} |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].name = "dome"+j; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].scaling.z = -1; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].alphaIndex = o_.length-j; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].visibility = 0.9999; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].material.diffuseTexture.hasAlpha = true; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].material.useAlphaFromDiffuseTexture = true; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].applyDisplacementMap(m[o_[j]].url, 0, 255, function(m){try{alert(BABYLON.Engine.Version);}catch(e){alert(e);}}, null, null, true, function(e){alert(e);}); |
|
|
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].rotationQuaternion = null; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].position.z = coords[o_[j]].lat; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].position.x = coords[o_[j]].lng; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].rotation.y = coords[o_[j]].heading / 180 * Math.PI; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].rotation.z = -coords[o_[j]].pitch / 180 * Math.PI; |
|
}*/ |
|
|
|
if (s == false) { |
|
v_url = document.getElementById("output_video").getElementsByTagName("video")[0].src; |
|
} else { |
|
v_url = document.getElementById("depth_video").getElementsByTagName("video")[0].src; |
|
} |
|
window.videoDome = new BABYLON.VideoDome( |
|
"videoDome", [v_url], |
|
{ |
|
resolution: 16, |
|
size: 512, |
|
clickToPlay: false, |
|
}, BABYLON.Engine.LastCreatedScene |
|
); |
|
var q = BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-2]._children; |
|
for (i = 0; i < q.length; i++) { |
|
let mesh = q[i]; |
|
mesh.dispose(false, true); |
|
} |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].rotationQuaternion = null; |
|
//BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].position.z = coords[counter].lat; |
|
//BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].position.x = coords[counter].lng; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].rotation.y = coords[counter].heading / 180 * Math.PI; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].rotation.z = -coords[counter].pitch / 180 * Math.PI; |
|
|
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].scaling.z = -1; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].material.diffuseTexture.hasAlpha = true; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].material.useAlphaFromDiffuseTexture = true; |
|
//BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].material.emissiveTexture = videoDome.videoTexture; |
|
//BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].material.emissiveTexture.hasAlpha = true; |
|
//BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].material.useAlphaFromEmissiveTexture = true; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].alphaIndex = 1; |
|
BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1].visibility = 0.9999; |
|
|
|
window.md = false; |
|
window.rd = false; |
|
window.compass = document.getElementById("compass"); |
|
window.x = 0; |
|
window.y = 0; |
|
window.xold = 0; |
|
window.yold = 0; |
|
window.buffer = null; |
|
window.bufferCanvas = document.createElement("canvas"); |
|
window.ctx = bufferCanvas.getContext("2d", { willReadFrequently: true }); |
|
window.video = document.getElementById("depth_video").getElementsByTagName("video")[0]; |
|
window.parallax = 0; |
|
window.xdir = new BABYLON.Vector3(1, 0, 0); |
|
window.rdir = new BABYLON.Vector3(0, 0, 0); |
|
window.videoDomeMesh = BABYLON.Engine.LastCreatedScene.meshes[BABYLON.Engine.LastCreatedScene.meshes.length-1]; |
|
|
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].addEventListener('pointermove', function(evt) { |
|
if (md === true) { |
|
rdir = BABYLON.Engine.LastCreatedScene.activeCamera.getDirection(xdir); |
|
videoDomeMesh.position.x = parallax * rdir.x; |
|
videoDomeMesh.position.z = parallax * rdir.z; |
|
|
|
try { |
|
compass.style.transform = "rotateX(" + (BABYLON.Engine.LastCreatedScene.activeCamera.beta-Math.PI/2) + "rad) rotateZ(" + BABYLON.Engine.LastCreatedScene.activeCamera.alpha + "rad)"; |
|
} catch(e) {alert(e);} |
|
} |
|
if (rd === true) { |
|
x = parseInt(evt.clientX - evt.target.getBoundingClientRect().x); |
|
y = parseInt(evt.clientY - evt.target.getBoundingClientRect().y); |
|
|
|
if (Math.abs(BABYLON.Engine.LastCreatedScene.activeCamera.radius) > (1.57-0.157)*16) { |
|
BABYLON.Engine.LastCreatedScene.activeCamera.radius = (1.57-0.157)*16; |
|
} else { |
|
BABYLON.Engine.LastCreatedScene.activeCamera.fov = BABYLON.Engine.LastCreatedScene.activeCamera.radius/16 + 0.157; |
|
} |
|
document.getElementById('zoom').value = BABYLON.Engine.LastCreatedScene.activeCamera.fov; |
|
document.getElementById('zoom').parentNode.childNodes[2].innerText = document.getElementById('zoom').value; |
|
|
|
xold=x; |
|
yold=y; |
|
} |
|
}); |
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].addEventListener('pointerdown', function() { |
|
md = true; |
|
}); |
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].addEventListener('pointerup', function() { |
|
md = false; |
|
rd = false; |
|
}); |
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].addEventListener('pointercancel', function() { |
|
md = false; |
|
rd = false; |
|
}); |
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].addEventListener('pointerleave', function() { |
|
md = false; |
|
rd = false; |
|
}); |
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].addEventListener('pointerout', function() { |
|
md = false; |
|
rd = false; |
|
}); |
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].addEventListener('contextmenu', function() { |
|
rd = true; |
|
}); |
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].addEventListener('gesturestart', function() { |
|
rd = true; |
|
}); |
|
document.getElementById("model3D").getElementsByTagName("canvas")[0].addEventListener('gestureend', function() { |
|
rd = false; |
|
}); |
|
|
|
|
|
function requestMap() { |
|
try { |
|
ctx.drawImage(video, 0, 0, video.videoWidth, video.videoHeight); |
|
videoDome.videoTexture.video.pause(); |
|
video.pause(); |
|
if (buffer) { |
|
counter = parseInt(video.currentTime); |
|
if (!coords[counter]) {counter = coords.length-1;} |
|
applyDisplacementMapFromBuffer(videoDomeMesh, buffer, video.videoWidth, video.videoHeight, 0, -1, null, null, true); |
|
} |
|
buffer = ctx.getImageData(0, 0, video.videoWidth, video.videoHeight).data; |
|
applyDisplacementMapFromBuffer(videoDomeMesh, buffer, video.videoWidth, video.videoHeight, 0, 1, null, null, true); |
|
} catch(e) {alert(e)} |
|
} |
|
window.requestMap = requestMap; |
|
|
|
videoDome.videoTexture.video.oncanplaythrough = function () { |
|
document.getElementById('seek').innerHTML = ''; |
|
for (var i=0; i<videoDome.videoTexture.video.duration; i++) { |
|
document.getElementById('seek').innerHTML += '<a href="#" style="position:absolute;left:'+(56+coords[i].lng/2)+'px;top:'+(56-coords[i].lat/2)+'px;" onclick="seek('+i+');">-'+i+'-</a> '; |
|
} |
|
bufferCanvas.width = video.videoWidth; |
|
bufferCanvas.height = video.videoHeight; |
|
|
|
videoPlay(); |
|
}; |
|
|
|
//var debugLayer = BABYLON.Engine.LastCreatedScene.debugLayer.show(); |
|
|
|
if (document.getElementById("model")) { |
|
document.getElementById("model").appendChild(document.getElementById("model3D")); |
|
toggleDisplay("model"); |
|
} |
|
|
|
clearInterval(intv); |
|
} |
|
} catch(e) {alert(e);} |
|
} |
|
}, 40); |
|
} |
|
""" |
|
|
|
js = """ |
|
async()=>{ |
|
console.log('Hi'); |
|
|
|
const chart = document.getElementById('chart'); |
|
const blur_in = document.getElementById('blur_in').getElementsByTagName('textarea')[0]; |
|
var md = false; |
|
var xold = 128; |
|
var yold = 32; |
|
var a = new Array(256); |
|
var l; |
|
|
|
for (var i=0; i<256; i++) { |
|
const hr = document.createElement('hr'); |
|
hr.style.backgroundColor = 'hsl(0,0%,' + (100-i/256*100) + '%)'; |
|
chart.appendChild(hr); |
|
} |
|
|
|
function resetLine() { |
|
a.fill(1); |
|
for (var i=0; i<256; i++) { |
|
chart.childNodes[i].style.height = a[i] + 'px'; |
|
chart.childNodes[i].style.marginTop = '32px'; |
|
} |
|
} |
|
resetLine(); |
|
window.resetLine = resetLine; |
|
|
|
function pointerDown(x, y) { |
|
md = true; |
|
xold = parseInt(x - chart.getBoundingClientRect().x); |
|
yold = parseInt(y - chart.getBoundingClientRect().y); |
|
chart.title = xold + ',' + yold; |
|
} |
|
window.pointerDown = pointerDown; |
|
|
|
function pointerUp() { |
|
md = false; |
|
var evt = document.createEvent('Event'); |
|
evt.initEvent('input', true, false); |
|
blur_in.dispatchEvent(evt); |
|
chart.title = ''; |
|
} |
|
window.pointerUp = pointerUp; |
|
|
|
function lerp(y1, y2, mu) { return y1*(1-mu)+y2*mu; } |
|
|
|
function drawLine(x, y) { |
|
x = parseInt(x - chart.getBoundingClientRect().x); |
|
y = parseInt(y - chart.getBoundingClientRect().y); |
|
if (md === true && y >= 0 && y < 64 && x >= 0 && x < 256) { |
|
if (y < 32) { |
|
a[x] = Math.abs(32-y)*2 + 1; |
|
chart.childNodes[x].style.height = a[x] + 'px'; |
|
chart.childNodes[x].style.marginTop = y + 'px'; |
|
|
|
for (var i=Math.min(xold, x)+1; i<Math.max(xold, x); i++) { |
|
l = parseInt(lerp( yold, y, (i-xold)/(x-xold) )); |
|
|
|
if (l < 32) { |
|
a[i] = Math.abs(32-l)*2 + 1; |
|
chart.childNodes[i].style.height = a[i] + 'px'; |
|
chart.childNodes[i].style.marginTop = l + 'px'; |
|
} else if (l < 64) { |
|
a[i] = Math.abs(l-32)*2 + 1; |
|
chart.childNodes[i].style.height = a[i] + 'px'; |
|
chart.childNodes[i].style.marginTop = (64-l) + 'px'; |
|
} |
|
} |
|
} else if (y < 64) { |
|
a[x] = Math.abs(y-32)*2 + 1; |
|
chart.childNodes[x].style.height = a[x] + 'px'; |
|
chart.childNodes[x].style.marginTop = (64-y) + 'px'; |
|
|
|
for (var i=Math.min(xold, x)+1; i<Math.max(xold, x); i++) { |
|
l = parseInt(lerp( yold, y, (i-xold)/(x-xold) )); |
|
|
|
if (l < 32) { |
|
a[i] = Math.abs(32-l)*2 + 1; |
|
chart.childNodes[i].style.height = a[i] + 'px'; |
|
chart.childNodes[i].style.marginTop = l + 'px'; |
|
} else if (l < 64) { |
|
a[i] = Math.abs(l-32)*2 + 1; |
|
chart.childNodes[i].style.height = a[i] + 'px'; |
|
chart.childNodes[i].style.marginTop = (64-l) + 'px'; |
|
} |
|
} |
|
} |
|
blur_in.value = a.join(' '); |
|
xold = x; |
|
yold = y; |
|
chart.title = xold + ',' + yold; |
|
} |
|
} |
|
window.drawLine = drawLine; |
|
|
|
|
|
window.screenshot = false; |
|
|
|
function snapshot() { |
|
if (BABYLON) { |
|
screenshot = true; |
|
BABYLON.Engine.LastCreatedScene.getEngine().onEndFrameObservable.add(function() { |
|
if (screenshot === true) { |
|
screenshot = false; |
|
try { |
|
BABYLON.Tools.CreateScreenshotUsingRenderTarget(BABYLON.Engine.LastCreatedScene.getEngine(), BABYLON.Engine.LastCreatedScene.activeCamera, |
|
{ precision: 1.0 }, (durl) => { |
|
var cnvs = document.getElementById("model3D").getElementsByTagName("canvas")[0]; //.getContext("webgl2"); |
|
var svgd = `<svg id="svg_out" viewBox="0 0 ` + cnvs.width + ` ` + cnvs.height + `" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> |
|
<defs> |
|
<filter id="blur" x="0" y="0" xmlns="http://www.w3.org/2000/svg"> |
|
<feGaussianBlur in="SourceGraphic" stdDeviation="1" /> |
|
</filter> |
|
</defs> |
|
<image filter="url(#blur)" id="svg_img" x="0" y="0" width="` + cnvs.width + `" height="` + cnvs.height + `" xlink:href=\"` + durl + `\"/> |
|
</svg>`; |
|
document.getElementById("cnv_out").width = cnvs.width; |
|
document.getElementById("cnv_out").height = cnvs.height; |
|
document.getElementById("img_out").src = "data:image/svg+xml;base64," + btoa(svgd); |
|
} |
|
); |
|
} catch(e) { alert(e); } |
|
// https://forum.babylonjs.com/t/best-way-to-save-to-jpeg-snapshots-of-scene/17663/11 |
|
} |
|
}); |
|
} |
|
} |
|
window.snapshot = snapshot; |
|
|
|
|
|
window.recorder = null; |
|
|
|
function record_video() { |
|
try { |
|
if (BABYLON.VideoRecorder.IsSupported(BABYLON.Engine.LastCreatedScene.getEngine()) && (recorder == null || !recorder.isRecording) ) { |
|
if (recorder == null) { |
|
recorder = new BABYLON.VideoRecorder(BABYLON.Engine.LastCreatedScene.getEngine(), { mimeType:'video/mp4', fps:25, /*audioTracks: mediaStreamDestination.stream.getAudioTracks()*/ }); |
|
} |
|
recorder.startRecording('video.mp4', 60*60); |
|
} |
|
} catch(e) {alert(e);} |
|
} |
|
window.record_video = record_video; |
|
|
|
function stop_recording() { |
|
if (recorder.isRecording) { |
|
recorder.stopRecording(); |
|
} |
|
} |
|
window.stop_recording = stop_recording; |
|
|
|
function seek(t) { |
|
videoDome.videoTexture.video.currentTime = t; |
|
if (videoDome.videoTexture.video.currentTime > videoDome.videoTexture.video.duration) { |
|
videoDome.videoTexture.video.currentTime = videoDome.videoTexture.video.duration; |
|
} else if (videoDome.videoTexture.video.currentTime < 0) { |
|
videoDome.videoTexture.video.currentTime = 0; |
|
} |
|
video.currentTime = t; |
|
if (video.currentTime > video.duration) { |
|
video.currentTime = video.duration; |
|
} else if (video.currentTime < 0) { |
|
video.currentTime = 0; |
|
} |
|
requestMap(); |
|
} |
|
window.seek = seek; |
|
|
|
function videoPlay() { |
|
videoDome.videoTexture.video.oncanplaythrough = null; |
|
video.oncanplaythrough = null; |
|
|
|
videoDome.videoTexture.video.loop = true; |
|
video.loop = true; |
|
videoDome.videoTexture.video.play(); |
|
video.play(); |
|
} |
|
window.videoPlay = videoPlay; |
|
|
|
|
|
function applyDisplacementMapFromBuffer( |
|
mesh, |
|
buffer, |
|
heightMapWidth, |
|
heightMapHeight, |
|
minHeight, |
|
maxHeight, |
|
uvOffset, |
|
uvScale, |
|
forceUpdate |
|
) { |
|
try { |
|
if (!mesh.isVerticesDataPresent(BABYLON.VertexBuffer.NormalKind)) { |
|
let positions = mesh.getVerticesData(BABYLON.VertexBuffer.PositionKind); |
|
let normals = []; |
|
BABYLON.VertexData.ComputeNormals(positions, mesh.getIndices(), normals, {useRightHandedSystem: true}); |
|
mesh.setVerticesData(BABYLON.VertexBuffer.NormalKind, normals); |
|
} |
|
const positions = mesh.getVerticesData(BABYLON.VertexBuffer.PositionKind, true, true); |
|
const normals = mesh.getVerticesData(BABYLON.VertexBuffer.NormalKind); |
|
const uvs = mesh.getVerticesData(BABYLON.VertexBuffer.UVKind); |
|
|
|
let position = BABYLON.Vector3.Zero(); |
|
const normal = BABYLON.Vector3.Zero(); |
|
const uv = BABYLON.Vector2.Zero(); |
|
|
|
uvOffset = uvOffset || BABYLON.Vector2.Zero(); |
|
uvScale = uvScale || new BABYLON.Vector2(1, 1); |
|
|
|
for (let index = 0; index < positions.length; index += 3) { |
|
BABYLON.Vector3.FromArrayToRef(positions, index, position); |
|
BABYLON.Vector3.FromArrayToRef(normals, index, normal); |
|
BABYLON.Vector2.FromArrayToRef(uvs, (index / 3) * 2, uv); |
|
|
|
// Compute height |
|
const u = (Math.abs(uv.x * uvScale.x + (uvOffset.x % 1)) * (heightMapWidth - 1)) % heightMapWidth | 0; |
|
const v = (Math.abs(uv.y * uvScale.y + (uvOffset.y % 1)) * (heightMapHeight - 1)) % heightMapHeight | 0; |
|
|
|
const pos = (u + v * heightMapWidth) * 4; |
|
const r = buffer[pos] / 255.0; |
|
const g = buffer[pos + 1] / 255.0; |
|
const b = buffer[pos + 2] / 255.0; |
|
const a = buffer[pos + 3] / 255.0; |
|
|
|
const gradient = r * 0.33 + g * 0.33 + b * 0.33; |
|
//const gradient = a; |
|
|
|
normal.normalize(); |
|
normal.scaleInPlace(minHeight + (maxHeight - minHeight) * gradient); |
|
position = position.add(normal); |
|
|
|
position.toArray(positions, index); |
|
} |
|
mesh.setVerticesData(BABYLON.VertexBuffer.PositionKind, positions); |
|
|
|
return mesh; |
|
} catch(e) {alert(e)} |
|
} |
|
window.applyDisplacementMapFromBuffer = applyDisplacementMapFromBuffer; |
|
|
|
|
|
var intv_ = setInterval(function(){ |
|
if (document.getElementById("image_edit") && document.getElementById("image_edit").getElementsByTagName("canvas")) { |
|
document.getElementById("image_edit").getElementsByTagName("canvas")[0].oncontextmenu = function(e){e.preventDefault();} |
|
document.getElementById("image_edit").getElementsByTagName("canvas")[0].ondrag = function(e){e.preventDefault();} |
|
|
|
document.getElementById("image_edit").getElementsByTagName("canvas")[0].onclick = function(e) { |
|
var x = parseInt((e.clientX-e.target.getBoundingClientRect().x)*e.target.width/e.target.getBoundingClientRect().width); |
|
var y = parseInt((e.clientY-e.target.getBoundingClientRect().y)*e.target.height/e.target.getBoundingClientRect().height); |
|
|
|
var p = document.getElementById("mouse").getElementsByTagName("textarea")[0].value.slice(1, -1); |
|
if (p != "") { p += ", "; } |
|
p += "[" + x + ", " + y + "]"; |
|
document.getElementById("mouse").getElementsByTagName("textarea")[0].value = "[" + p + "]"; |
|
|
|
var evt = document.createEvent("Event"); |
|
evt.initEvent("input", true, false); |
|
document.getElementById("mouse").getElementsByTagName("textarea")[0].dispatchEvent(evt); |
|
} |
|
document.getElementById("image_edit").getElementsByTagName("canvas")[0].onfocus = function(e) { |
|
document.getElementById("mouse").getElementsByTagName("textarea")[0].value = "[]"; |
|
} |
|
document.getElementById("image_edit").getElementsByTagName("canvas")[0].onblur = function(e) { |
|
document.getElementById("mouse").getElementsByTagName("textarea")[0].value = "[]"; |
|
} |
|
clearInterval(intv_); |
|
} |
|
}, 40); |
|
|
|
} |
|
""" |
|
|
|
css = """ |
|
#img-display-container { |
|
max-height: 100vh; |
|
} |
|
#img-display-input { |
|
max-height: 80vh; |
|
} |
|
#img-display-output { |
|
max-height: 80vh; |
|
} |
|
""" |
|
|
|
head = """ |
|
""" |
|
|
|
title = "# Depth Anything V2 Video" |
|
description = """**Depth Anything V2** on full video files, intended for Google Street View panorama slideshows. |
|
Please refer to the [paper](https://arxiv.org/abs/2406.09414), [project page](https://depth-anything-v2.github.io), and [github](https://github.com/DepthAnything/Depth-Anything-V2) for more details.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(css=css, js=js, head=head) as demo: |
|
gr.Markdown(title) |
|
gr.Markdown(description) |
|
gr.Markdown("### Video Depth Prediction demo") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Group(): |
|
input_json = gr.Textbox(elem_id="json_in", value="{}", label="JSON", interactive=False) |
|
input_url = gr.Textbox(elem_id="url_in", value="./examples/streetview.mp4", label="URL") |
|
input_video = gr.Video(label="Input Video", format="mp4") |
|
input_url.input(fn=loadfile, inputs=[input_url], outputs=[input_video]) |
|
submit = gr.Button("Submit") |
|
with gr.Group(): |
|
output_frame = gr.Gallery(label="Frames", preview=True, columns=8192, interactive=False) |
|
output_switch = gr.Checkbox(label="Show depths") |
|
output_switch.input(fn=switch_rows, inputs=[output_switch], outputs=[output_frame]) |
|
selected = gr.Number(label="Selected frame", visible=False, elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False) |
|
with gr.Accordion(label="Depths", open=False): |
|
output_depth = gr.Files(label="Depth files", interactive=False) |
|
with gr.Group(): |
|
output_mask = gr.ImageEditor(layers=False, sources=('clipboard'), show_download_button=True, type="numpy", interactive=True, transforms=(None,), eraser=gr.Eraser(), brush=gr.Brush(default_size=0, colors=['black', '#505050', '#a0a0a0', 'white']), elem_id="image_edit") |
|
with gr.Accordion(label="Border", open=False): |
|
boffset = gr.Slider(label="Inner", value=1, maximum=256, minimum=0, step=1) |
|
bsize = gr.Slider(label="Outer", value=32, maximum=256, minimum=0, step=1) |
|
mouse = gr.Textbox(label="Mouse x,y", elem_id="mouse", value="""[]""", interactive=False) |
|
reset = gr.Button("Reset", size='sm') |
|
mouse.input(fn=draw_mask, show_progress="minimal", inputs=[boffset, bsize, mouse, output_mask], outputs=[output_mask]) |
|
reset.click(fn=reset_mask, inputs=[output_mask], outputs=[output_mask]) |
|
|
|
with gr.Column(): |
|
model_type = gr.Dropdown([("small", "vits"), ("base", "vitb"), ("large", "vitl"), ("giant", "vitg")], type="value", value="vits", label='Model Type') |
|
processed_video = gr.Video(label="Output Video", format="mp4", elem_id="output_video", interactive=False) |
|
processed_zip = gr.File(label="Output Archive", interactive=False) |
|
depth_video = gr.Video(label="Depth Video", format="mp4", elem_id="depth_video", interactive=False, visible=True) |
|
result = gr.Model3D(label="3D Mesh", clear_color=[0.5, 0.5, 0.5, 0.0], camera_position=[0, 90, 512], zoom_speed=2.0, pan_speed=2.0, interactive=True, elem_id="model3D") |
|
with gr.Accordion(label="Embed in website", open=False): |
|
embed_model = gr.Textbox(elem_id="embed_model", label="Include this wherever the model is to appear on the page", interactive=False, value=""" |
|
|
|
""") |
|
|
|
with gr.Tab("Blur"): |
|
chart_c = gr.HTML(elem_id="chart_c", value="""<div id='chart' onpointermove='window.drawLine(event.clientX, event.clientY);' onpointerdown='window.pointerDown(event.clientX, event.clientY);' onpointerup='window.pointerUp();' onpointerleave='window.pointerUp();' onpointercancel='window.pointerUp();' onclick='window.resetLine();'></div> |
|
<style> |
|
* { |
|
user-select: none; |
|
} |
|
html, body { |
|
user-select: none; |
|
} |
|
#model3D canvas { |
|
user-select: none; |
|
} |
|
#chart hr { |
|
width: 1px; |
|
height: 1px; |
|
clear: none; |
|
border: 0; |
|
padding:0; |
|
display: inline-block; |
|
position: relative; |
|
vertical-align: top; |
|
margin-top:32px; |
|
} |
|
#chart { |
|
padding:0; |
|
margin:0; |
|
width:256px; |
|
height:64px; |
|
background-color:#808080; |
|
touch-action: none; |
|
} |
|
#compass_box { |
|
position:absolute; |
|
top:2em; |
|
right:3px; |
|
border:1px dashed gray; |
|
border-radius: 50%; |
|
width:1.5em; |
|
height:1.5em; |
|
padding:0; |
|
margin:0; |
|
} |
|
#compass { |
|
position:absolute; |
|
transform:rotate(0deg); |
|
border:1px solid black; |
|
border-radius: 50%; |
|
width:100%; |
|
height:100%; |
|
padding:0; |
|
margin:0; |
|
line-height:1em; |
|
letter-spacing:0; |
|
} |
|
#compass b { |
|
margin-top:-1px; |
|
} |
|
</style> |
|
""") |
|
average = gr.HTML(value="""<label for='average'>Average</label><input id='average' type='range' style='width:256px;height:1em;' value='1' min='1' max='15' step='2' onclick=' |
|
var pts_a = document.getElementById(\"blur_in\").getElementsByTagName(\"textarea\")[0].value.split(\" \"); |
|
for (var i=0; i<256; i++) { |
|
var avg = 0; |
|
var div = this.value; |
|
for (var j = i-parseInt(this.value/2); j <= i+parseInt(this.value/2); j++) { |
|
if (pts_a[j]) { |
|
avg += parseInt(pts_a[j]); |
|
} else if (div > 1) { |
|
div--; |
|
} |
|
} |
|
pts_a[i] = Math.round((avg / div - 1) / 2) * 2 + 1; |
|
|
|
document.getElementById(\"chart\").childNodes[i].style.height = pts_a[i] + \"px\"; |
|
document.getElementById(\"chart\").childNodes[i].style.marginTop = (64-pts_a[i])/2 + \"px\"; |
|
} |
|
document.getElementById(\"blur_in\").getElementsByTagName(\"textarea\")[0].value = pts_a.join(\" \"); |
|
|
|
var evt = document.createEvent(\"Event\"); |
|
evt.initEvent(\"input\", true, false); |
|
document.getElementById(\"blur_in\").getElementsByTagName(\"textarea\")[0].dispatchEvent(evt); |
|
' oninput=' |
|
this.parentNode.childNodes[2].innerText = this.value; |
|
' onchange='this.click();'/><span>1</span>""") |
|
with gr.Accordion(label="Levels", open=False): |
|
blur_in = gr.Textbox(elem_id="blur_in", label="Kernel size", show_label=False, interactive=False, value=blurin) |
|
with gr.Group(): |
|
with gr.Accordion(label="Locations", open=False): |
|
output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected]) |
|
example_coords = """[ |
|
{"lat": 50.07379596793083, "lng": 14.437146122950555, "heading": 152.70303, "pitch": 2.607833999999997}, |
|
{"lat": 50.073799567020004, "lng": 14.437146774240507, "heading": 151.12973, "pitch": 2.8672300000000064}, |
|
{"lat": 50.07377647505558, "lng": 14.437161000659017, "heading": 151.41025, "pitch": 3.4802200000000028}, |
|
{"lat": 50.07379496839027, "lng": 14.437148958238538, "heading": 151.93391, "pitch": 2.843050000000005}, |
|
{"lat": 50.073823157821664, "lng": 14.437124189538856, "heading": 152.95769, "pitch": 4.233024999999998} |
|
]""" |
|
coords = gr.Textbox(elem_id="coords", value=example_coords, label="Coordinates", interactive=False) |
|
mesh_order = gr.Textbox(elem_id="order", value="", label="Order", interactive=False) |
|
load_all = gr.Checkbox(label="Load all") |
|
|
|
with gr.Group(): |
|
camera = gr.HTML(value="""<div style='width:128px;height:128px;border:1px dotted gray;padding:0;margin:0;float:left;clear:none;' id='seek'></div> |
|
<span style='max-width:50%;float:right;clear:none;text-align:right;'> |
|
<a href='#' id='reset_cam' style='float:right;clear:none;color:white' onclick=' |
|
if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) { |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata = { |
|
screenshot: true, |
|
pipeline: new BABYLON.DefaultRenderingPipeline(\"default\", true, BABYLON.Engine.LastCreatedScene, [BABYLON.Engine.LastCreatedScene.activeCamera]) |
|
} |
|
} |
|
BABYLON.Engine.LastCreatedScene.activeCamera.radius = 0; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.alpha = 0; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.beta = Math.PI / 2; |
|
|
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.samples = 4; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.fov = document.getElementById(\"zoom\").value; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.contrast = document.getElementById(\"contrast\").value; |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.exposure = document.getElementById(\"exposure\").value; |
|
|
|
document.getElementById(\"model3D\").getElementsByTagName(\"canvas\")[0].style.filter = \"blur(\" + Math.ceil(Math.log2(Math.PI/document.getElementById(\"zoom\").value))/2.0*Math.sqrt(2.0) + \"px)\"; |
|
document.getElementById(\"model3D\").getElementsByTagName(\"canvas\")[0].oncontextmenu = function(e){e.preventDefault();} |
|
document.getElementById(\"model3D\").getElementsByTagName(\"canvas\")[0].ondrag = function(e){e.preventDefault();} |
|
'>Reset camera</a><br/> |
|
<span><label for='zoom' style='width:8em'>Zoom</label><input id='zoom' type='range' style='width:128px;height:1em;' value='0.8' min='0.157' max='1.57' step='0.001' oninput=' |
|
if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) { |
|
var evt = document.createEvent(\"Event\"); |
|
evt.initEvent(\"click\", true, false); |
|
document.getElementById(\"reset_cam\").dispatchEvent(evt); |
|
} |
|
BABYLON.Engine.LastCreatedScene.activeCamera.fov = this.value; |
|
this.parentNode.childNodes[2].innerText = BABYLON.Engine.LastCreatedScene.activeCamera.fov; |
|
|
|
document.getElementById(\"model3D\").getElementsByTagName(\"canvas\")[0].style.filter = \"blur(\" + BABYLON.Engine.LastCreatedScene.getNodes()[parseInt(document.getElementById(\"fnum\").getElementsByTagName(\"input\")[0].value)+1].material.pointSize/2.0*Math.sqrt(2.0) + \"px)\"; |
|
'/><span>0.8</span></span><br/> |
|
<span><label for='pan' style='width:8em'>Pan</label><input id='pan' type='range' style='width:128px;height:1em;' value='0' min='-16' max='16' step='0.001' oninput=' |
|
if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) { |
|
var evt = document.createEvent(\"Event\"); |
|
evt.initEvent(\"click\", true, false); |
|
document.getElementById(\"reset_cam\").dispatchEvent(evt); |
|
} |
|
parallax = this.value; |
|
rdir = BABYLON.Engine.LastCreatedScene.activeCamera.getDirection(xdir); |
|
videoDomeMesh.position.x = parallax * rdir.x; |
|
videoDomeMesh.position.z = parallax * rdir.z; |
|
this.parentNode.childNodes[2].innerText = parallax; |
|
'/><span>0.0</span></span><br/> |
|
<span><label for='contrast' style='width:8em'>Contrast</label><input id='contrast' type='range' style='width:128px;height:1em;' value='1.0' min='0' max='2' step='0.001' oninput=' |
|
if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) { |
|
var evt = document.createEvent(\"Event\"); |
|
evt.initEvent(\"click\", true, false); |
|
document.getElementById(\"reset_cam\").dispatchEvent(evt); |
|
} |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.contrast = this.value; |
|
this.parentNode.childNodes[2].innerText = BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.contrast; |
|
'/><span>1.0</span></span><br/> |
|
<span><label for='exposure' style='width:8em'>Exposure</label><input id='exposure' type='range' style='width:128px;height:1em;' value='1.0' min='0' max='2' step='0.001' oninput=' |
|
if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) { |
|
var evt = document.createEvent(\"Event\"); |
|
evt.initEvent(\"click\", true, false); |
|
document.getElementById(\"reset_cam\").dispatchEvent(evt); |
|
} |
|
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.exposure = this.value; |
|
this.parentNode.childNodes[2].innerText = BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.exposure; |
|
'/><span>1.0</span></span><br/> |
|
<a href='#' onclick='snapshot();'>Screenshot</a> |
|
<a href='#' onclick='record_video();'>Record</a> |
|
<a href='#' onclick='stop_recording();'>Stop rec.</a> |
|
<a href='#' onclick='videoPlay();'>Play</a></span>""") |
|
snapshot = gr.HTML(value="""<img src='' id='img_out' onload='var ctxt = document.getElementById(\"cnv_out\").getContext(\"2d\");ctxt.drawImage(this, 0, 0);'/><br/> |
|
<canvas id='cnv_out'></canvas> |
|
<div id='compass_box'><div id='compass'><a id='fullscreen' onclick=' |
|
const model3D = document.getElementById(\"model3D\"); |
|
if (model3D.parentNode.tagName != \"BODY\") { |
|
window.modelContainer = model3D.parentNode.id; |
|
document.body.appendChild(model3D); |
|
model3D.style.position = \"fixed\"; |
|
model3D.style.left = \"0\"; |
|
model3D.style.top = \"0\"; |
|
model3D.style.zIndex = \"100\"; |
|
document.getElementById(\"compass_box\").style.zIndex = \"101\"; |
|
} else { |
|
document.getElementById(window.modelContainer).appendChild(model3D); |
|
model3D.style.position = \"relative\"; |
|
model3D.style.left = \"0\"; |
|
model3D.style.top = \"0\"; |
|
model3D.style.zIndex = \"initial\"; |
|
document.getElementById(\"compass_box\").style.zIndex = \"initial\"; |
|
}'><b style='color:blue;'>◅</b>𝍠<b style='color:red;'>▻</b></a></div> |
|
</div> |
|
""") |
|
render = gr.Button("Render") |
|
input_json.input(show_json, inputs=[input_json], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords]) |
|
|
|
|
|
def on_submit(uploaded_video,model_type,blur_in,boffset,bsize,coordinates): |
|
global locations |
|
locations = [] |
|
avg = [0, 0] |
|
|
|
locations = json.loads(coordinates) |
|
for k, location in enumerate(locations): |
|
if "tiles" in locations[k]: |
|
locations[k]["heading"] = locations[k]["tiles"]["originHeading"] |
|
locations[k]["pitch"] = locations[k]["tiles"]["originPitch"] |
|
elif not "heading" in locations[k] or not "pitch" in locations[k]: |
|
locations[k]["heading"] = 0.0 |
|
locations[k]["pitch"] = 0.0 |
|
|
|
if "location" in locations[k]: |
|
locations[k] = locations[k]["location"]["latLng"] |
|
elif not "lat" in locations[k] or not "lng" in locations[k]: |
|
locations[k]["lat"] = 0.0 |
|
locations[k]["lng"] = 0.0 |
|
|
|
avg[0] = avg[0] + locations[k]["lat"] |
|
avg[1] = avg[1] + locations[k]["lng"] |
|
|
|
if len(locations) > 0: |
|
avg[0] = avg[0] / len(locations) |
|
avg[1] = avg[1] / len(locations) |
|
|
|
for k, location in enumerate(locations): |
|
lat = vincenty((location["lat"], 0), (avg[0], 0)) * 1000 |
|
lng = vincenty((0, location["lng"]), (0, avg[1])) * 1000 |
|
locations[k]["lat"] = float(lat / 2.5 * 111 * np.sign(location["lat"]-avg[0])) |
|
locations[k]["lng"] = float(lng / 2.5 * 111 * np.sign(location["lng"]-avg[1])) |
|
print(locations) |
|
|
|
|
|
|
|
|
|
output_video_path = make_video(uploaded_video,encoder=model_type,blur_data=blurin,o=boffset,b=bsize) |
|
|
|
return output_video_path + (json.dumps(locations),) |
|
|
|
submit.click(on_submit, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, depth_video, coords]) |
|
render.click(None, inputs=[coords, mesh_order, output_frame, output_mask, selected, output_depth, output_switch], outputs=None, js=load_model) |
|
render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all], outputs=[result, mesh_order]) |
|
|
|
example_files = [["./examples/streetview.mp4", "vits", blurin, 1, 32, example_coords]] |
|
examples = gr.Examples(examples=example_files, fn=on_submit, cache_examples=True, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, depth_video, coords]) |
|
|
|
|
|
if __name__ == '__main__': |
|
demo.queue().launch() |