Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ import cv2
|
|
13 |
from matplotlib import pyplot as plt
|
14 |
from inpainting import StableDiffusionInpaintingPipeline
|
15 |
from torchvision import transforms
|
16 |
-
|
17 |
|
18 |
#from huggingface_hub import hf_hub_download
|
19 |
#hf_hub_download(repo_id="ThereforeGames/txt2mask", filename="/repositories/clipseg/")
|
@@ -22,7 +22,7 @@ from torchvision import transforms
|
|
22 |
|
23 |
#from huggingface_hub import Repository
|
24 |
#with Repository(local_dir="clipseg", clone_from="ThereforeGames/txt2mask/repositories/clipseg/")
|
25 |
-
|
26 |
import sys
|
27 |
import os
|
28 |
|
@@ -31,7 +31,7 @@ zf = ZipFile('clipseg-master.zip', 'r')
|
|
31 |
zf.extractall('./clipseg')
|
32 |
zf.close()
|
33 |
|
34 |
-
|
35 |
from huggingface_hub import HfApi
|
36 |
api = HfApi()
|
37 |
api.upload_folder(
|
@@ -63,7 +63,7 @@ pipe = StableDiffusionInpaintingPipeline.from_pretrained(
|
|
63 |
|
64 |
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64)
|
65 |
model.eval()
|
66 |
-
model.load_state_dict(torch.load('./clipseg/weights/rd64-uni.pth', map_location=torch.device('cuda')), strict=False)
|
67 |
|
68 |
transform = transforms.Compose([
|
69 |
transforms.ToTensor(),
|
|
|
13 |
from matplotlib import pyplot as plt
|
14 |
from inpainting import StableDiffusionInpaintingPipeline
|
15 |
from torchvision import transforms
|
16 |
+
from clipseg.models.clipseg import CLIPDensePredT
|
17 |
|
18 |
#from huggingface_hub import hf_hub_download
|
19 |
#hf_hub_download(repo_id="ThereforeGames/txt2mask", filename="/repositories/clipseg/")
|
|
|
22 |
|
23 |
#from huggingface_hub import Repository
|
24 |
#with Repository(local_dir="clipseg", clone_from="ThereforeGames/txt2mask/repositories/clipseg/")
|
25 |
+
"""
|
26 |
import sys
|
27 |
import os
|
28 |
|
|
|
31 |
zf.extractall('./clipseg')
|
32 |
zf.close()
|
33 |
|
34 |
+
|
35 |
from huggingface_hub import HfApi
|
36 |
api = HfApi()
|
37 |
api.upload_folder(
|
|
|
63 |
|
64 |
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64)
|
65 |
model.eval()
|
66 |
+
model.load_state_dict(torch.load('./clipseg-master/weights/rd64-uni.pth', map_location=torch.device('cuda')), strict=False)
|
67 |
|
68 |
transform = transforms.Compose([
|
69 |
transforms.ToTensor(),
|