rupeshs commited on
Commit
4b51ee4
·
1 Parent(s): b4064cc

Add safety_check

Browse files
backend/safety_check.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import CLIPProcessor, CLIPModel
2
+ from PIL import Image
3
+
4
+
5
+ model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
6
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
7
+
8
+
9
+ def is_safe_image(
10
+ model,
11
+ processor,
12
+ image,
13
+ ):
14
+ # Load image
15
+ # image = Image.open(
16
+ # r"F:\om\2025\fastsdcpumcp\fastsdcpu\results\829a2123-92c8-4957-ad2f-06365a19665a-1.png"
17
+ # )
18
+ categories = ["safe", "nsfw"]
19
+ inputs = processor(
20
+ text=categories,
21
+ images=image,
22
+ return_tensors="pt",
23
+ padding=True,
24
+ )
25
+ outputs = model(**inputs)
26
+ logits_per_image = outputs.logits_per_image
27
+ probs = logits_per_image.softmax(dim=1)
28
+ safe_prob = dict(zip(categories, probs[0].tolist()))
29
+ print(safe_prob)
30
+ return safe_prob["safe"] > safe_prob["nsfw"]
frontend/webui/hf_demo.py CHANGED
@@ -42,7 +42,7 @@ def predict(
42
  print(f"prompt - {prompt}")
43
  lcm_diffusion_setting = LCMDiffusionSetting()
44
  lcm_diffusion_setting.diffusion_task = DiffusionTask.text_to_image.value
45
- lcm_diffusion_setting.openvino_lcm_model_id = "rupeshs/sd-turbo-openvino"
46
  lcm_diffusion_setting.use_lcm_lora = False
47
  lcm_diffusion_setting.prompt = prompt
48
  lcm_diffusion_setting.guidance_scale = 1.0
@@ -55,15 +55,15 @@ def predict(
55
  # lcm_diffusion_setting.image_height = 320 if is_openvino_device() else 512
56
  lcm_diffusion_setting.image_width = 512
57
  lcm_diffusion_setting.image_height = 512
58
- lcm_diffusion_setting.use_openvino = True
59
- lcm_diffusion_setting.use_tiny_auto_encoder = True
60
  pprint(lcm_diffusion_setting.model_dump())
61
  lcm_text_to_image.init(lcm_diffusion_setting=lcm_diffusion_setting)
62
  start = perf_counter()
63
  images = lcm_text_to_image.generate(lcm_diffusion_setting)
64
  latency = perf_counter() - start
65
  print(f"Latency: {latency:.2f} seconds")
66
- return images[0] # .resize([512, 512], PIL.Image.ANTIALIAS)
67
 
68
 
69
  css = """
 
42
  print(f"prompt - {prompt}")
43
  lcm_diffusion_setting = LCMDiffusionSetting()
44
  lcm_diffusion_setting.diffusion_task = DiffusionTask.text_to_image.value
45
+ lcm_diffusion_setting.openvino_lcm_model_id = "rupeshs/hyper-sd-sdxl-1-step"
46
  lcm_diffusion_setting.use_lcm_lora = False
47
  lcm_diffusion_setting.prompt = prompt
48
  lcm_diffusion_setting.guidance_scale = 1.0
 
55
  # lcm_diffusion_setting.image_height = 320 if is_openvino_device() else 512
56
  lcm_diffusion_setting.image_width = 512
57
  lcm_diffusion_setting.image_height = 512
58
+ lcm_diffusion_setting.use_openvino = False
59
+ lcm_diffusion_setting.use_tiny_auto_encoder = False
60
  pprint(lcm_diffusion_setting.model_dump())
61
  lcm_text_to_image.init(lcm_diffusion_setting=lcm_diffusion_setting)
62
  start = perf_counter()
63
  images = lcm_text_to_image.generate(lcm_diffusion_setting)
64
  latency = perf_counter() - start
65
  print(f"Latency: {latency:.2f} seconds")
66
+ return images[0]
67
 
68
 
69
  css = """