Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -17,6 +17,8 @@ from clipseg.models.clipseg import CLIPDensePredT
|
|
17 |
|
18 |
#auth_token = os.environ.get("API_TOKEN") or True
|
19 |
|
|
|
|
|
20 |
def download_image(url):
|
21 |
response = requests.get(url)
|
22 |
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
@@ -29,6 +31,8 @@ pipe = DiffusionPipeline.from_pretrained(
|
|
29 |
use_auth_token="",
|
30 |
).to(device)
|
31 |
|
|
|
|
|
32 |
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64)
|
33 |
model.eval()
|
34 |
model.load_state_dict(torch.load('./clipseg/weights/rd64-uni.pth', map_location=torch.device('cpu')), strict=False)
|
|
|
17 |
|
18 |
#auth_token = os.environ.get("API_TOKEN") or True
|
19 |
|
20 |
+
def dummy_checker(images, **kwargs): return images, False
|
21 |
+
|
22 |
def download_image(url):
|
23 |
response = requests.get(url)
|
24 |
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
|
|
31 |
use_auth_token="",
|
32 |
).to(device)
|
33 |
|
34 |
+
pipe.safety_checker = dummy_checker
|
35 |
+
|
36 |
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64)
|
37 |
model.eval()
|
38 |
model.load_state_dict(torch.load('./clipseg/weights/rd64-uni.pth', map_location=torch.device('cpu')), strict=False)
|