asahi417 commited on
Commit
3db013e
·
verified ·
1 Parent(s): d1e5ac3

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +27 -8
app.py CHANGED
@@ -1,10 +1,13 @@
 
1
  import gradio as gr
2
  from diffusers.utils import load_image
3
  import spaces
4
- from panna.pipeline import PipelineDepth2ImageV2
 
5
 
6
-
7
- model = PipelineDepth2ImageV2()
 
8
  title = ("# [Depth2Image](https://huggingface.co/stabilityai/stable-diffusion-2-depth) with [DepthAnythingV2](https://huggingface.co/depth-anything/Depth-Anything-V2-Large-hf)\n"
9
  "Depth2Image with depth map predicted by DepthAnything V2. The demo is part of [panna](https://github.com/abacws-abacus/panna) project.")
10
  example_files = []
@@ -15,16 +18,32 @@ for n in range(1, 10):
15
 
16
  @spaces.GPU
17
  def infer(init_image, prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps):
18
- return model(
19
- init_image,
20
- prompt=prompt,
21
- negative_prompt=negative_prompt,
 
 
22
  guidance_scale=guidance_scale,
23
  num_inference_steps=num_inference_steps,
24
  height=height,
25
  width=width,
26
  seed=seed
27
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
 
30
  with gr.Blocks() as demo:
 
1
+ import torch
2
  import gradio as gr
3
  from diffusers.utils import load_image
4
  import spaces
5
+ # from panna.pipeline import PipelineDepth2ImageV2
6
+ from panna import Depth2Image, DepthAnythingV2
7
 
8
+ # model = PipelineDepth2ImageV2()
9
+ model_depth = DepthAnythingV2("depth-anything/Depth-Anything-V2-Large-hf", torch_dtype=torch.float32)
10
+ model_image = Depth2Image("stabilityai/stable-diffusion-2-depth")
11
  title = ("# [Depth2Image](https://huggingface.co/stabilityai/stable-diffusion-2-depth) with [DepthAnythingV2](https://huggingface.co/depth-anything/Depth-Anything-V2-Large-hf)\n"
12
  "Depth2Image with depth map predicted by DepthAnything V2. The demo is part of [panna](https://github.com/abacws-abacus/panna) project.")
13
  example_files = []
 
18
 
19
  @spaces.GPU
20
  def infer(init_image, prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps):
21
+ depth = model_depth.image2depth([init_image], return_tensor=True)
22
+ return model_image.text2image(
23
+ [init_image],
24
+ depth_maps=depth,
25
+ prompt=[prompt],
26
+ negative_prompt=[negative_prompt],
27
  guidance_scale=guidance_scale,
28
  num_inference_steps=num_inference_steps,
29
  height=height,
30
  width=width,
31
  seed=seed
32
+ )[0]
33
+
34
+
35
+ # @spaces.GPU
36
+ # def infer(init_image, prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps):
37
+ # return model(
38
+ # init_image,
39
+ # prompt=prompt,
40
+ # negative_prompt=negative_prompt,
41
+ # guidance_scale=guidance_scale,
42
+ # num_inference_steps=num_inference_steps,
43
+ # height=height,
44
+ # width=width,
45
+ # seed=seed
46
+ # )
47
 
48
 
49
  with gr.Blocks() as demo: