freealise commited on
Commit
c269360
1 Parent(s): c4f662c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -35,6 +35,7 @@ encoder2name = {
35
  'vitg': 'Giant', # we are undergoing company review procedures to release our giant model checkpoint
36
  }
37
 
 
38
  edge = []
39
  gradient = None
40
  params = { "fnum":0, "l":16 }
@@ -74,7 +75,7 @@ def predict_depth(image, model):
74
  #def predict_depth(model, image):
75
  # return model(image)["depth"]
76
 
77
- def make_video(video_path, outdir='./vis_video_depth', encoder='vits', blur_data=blur_in):
78
  if encoder not in ["vitl","vitb","vits","vitg"]:
79
  encoder = "vits"
80
 
@@ -1036,7 +1037,7 @@ with gr.Blocks(css=css, js=js) as demo:
1036
  this.parentNode.childNodes[2].innerText = this.value;
1037
  ' onchange='this.click();'/><span>1</span>""")
1038
  with gr.Accordion(label="Blur levels", open=False):
1039
- blur_in = gr.Textbox(elem_id="blur_in", label="Kernel size", show_label=False, interactive=False, value="1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1")
1040
  with gr.Accordion(label="Locations", open=False):
1041
  selected = gr.Number(elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False)
1042
  output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected, bgcolor])
 
35
  'vitg': 'Giant', # we are undergoing company review procedures to release our giant model checkpoint
36
  }
37
 
38
+ blurin = "1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1"
39
  edge = []
40
  gradient = None
41
  params = { "fnum":0, "l":16 }
 
75
  #def predict_depth(model, image):
76
  # return model(image)["depth"]
77
 
78
+ def make_video(video_path, outdir='./vis_video_depth', encoder='vits', blur_data=blurin):
79
  if encoder not in ["vitl","vitb","vits","vitg"]:
80
  encoder = "vits"
81
 
 
1037
  this.parentNode.childNodes[2].innerText = this.value;
1038
  ' onchange='this.click();'/><span>1</span>""")
1039
  with gr.Accordion(label="Blur levels", open=False):
1040
+ blur_in = gr.Textbox(elem_id="blur_in", label="Kernel size", show_label=False, interactive=False, value=blurin)
1041
  with gr.Accordion(label="Locations", open=False):
1042
  selected = gr.Number(elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False)
1043
  output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected, bgcolor])