Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -59,7 +59,7 @@ model_id_or_path = "CompVis/stable-diffusion-v1-4"
|
|
59 |
pipe = StableDiffusionInpaintingPipeline.from_pretrained(
|
60 |
model_id_or_path,
|
61 |
revision="fp16",
|
62 |
-
torch_dtype=torch.
|
63 |
use_auth_token=auth_token
|
64 |
)
|
65 |
#self.register_buffer('n_', ...)
|
@@ -68,7 +68,7 @@ print ("torch.backends.mps.is_available: ", torch.backends.mps.is_available())
|
|
68 |
pipe = pipe.to(device)
|
69 |
|
70 |
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, complex_trans_conv=True)
|
71 |
-
model.eval()
|
72 |
model.load_state_dict(torch.load('./clipseg/weights/rd64-uni.pth', map_location=torch.device(device)), strict=False) #False
|
73 |
|
74 |
imgRes = 256
|
|
|
59 |
pipe = StableDiffusionInpaintingPipeline.from_pretrained(
|
60 |
model_id_or_path,
|
61 |
revision="fp16",
|
62 |
+
torch_dtype=torch.float64, #float16
|
63 |
use_auth_token=auth_token
|
64 |
)
|
65 |
#self.register_buffer('n_', ...)
|
|
|
68 |
pipe = pipe.to(device)
|
69 |
|
70 |
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, complex_trans_conv=True)
|
71 |
+
model.eval() #.half()
|
72 |
model.load_state_dict(torch.load('./clipseg/weights/rd64-uni.pth', map_location=torch.device(device)), strict=False) #False
|
73 |
|
74 |
imgRes = 256
|