rupeshs commited on
Commit
1c32148
·
1 Parent(s): a704e4e

model update

Browse files
backend/safety_check.py CHANGED
@@ -2,8 +2,8 @@ from transformers import CLIPProcessor, CLIPModel
2
  from PIL import Image
3
 
4
 
5
- model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
6
- processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
7
 
8
 
9
  def is_safe_image(
 
2
  from PIL import Image
3
 
4
 
5
+ # model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
6
+ # processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
7
 
8
 
9
  def is_safe_image(
frontend/webui/hf_demo.py CHANGED
@@ -9,15 +9,19 @@ import base64
9
  from backend.device import get_device_name
10
  from constants import APP_VERSION
11
  from backend.device import is_openvino_device
12
- import PIL
13
  from backend.models.lcmdiffusion_setting import DiffusionTask
 
14
  from pprint import pprint
 
15
 
16
  lcm_text_to_image = LCMTextToImage()
17
  lcm_lora = LCMLora(
18
  base_model_id="Lykon/dreamshaper-7",
19
  lcm_lora_id="latent-consistency/lcm-lora-sdv1-5",
20
  )
 
 
21
 
22
 
23
  # https://github.com/gradio-app/gradio/issues/2635#issuecomment-1423531319
@@ -56,7 +60,7 @@ def predict(
56
  # lcm_diffusion_setting.image_height = 320 if is_openvino_device() else 512
57
  lcm_diffusion_setting.image_width = 512
58
  lcm_diffusion_setting.image_height = 512
59
- lcm_diffusion_setting.use_openvino = False
60
  lcm_diffusion_setting.use_tiny_auto_encoder = False
61
  pprint(lcm_diffusion_setting.model_dump())
62
  lcm_text_to_image.init(lcm_diffusion_setting=lcm_diffusion_setting)
@@ -64,7 +68,12 @@ def predict(
64
  images = lcm_text_to_image.generate(lcm_diffusion_setting)
65
  latency = perf_counter() - start
66
  print(f"Latency: {latency:.2f} seconds")
67
- return images[0] # .resize([512, 512], PIL.Image.ANTIALIAS)
 
 
 
 
 
68
 
69
 
70
  css = """
 
9
  from backend.device import get_device_name
10
  from constants import APP_VERSION
11
  from backend.device import is_openvino_device
12
+ from PIL import Image
13
  from backend.models.lcmdiffusion_setting import DiffusionTask
14
+ from backend.safety_check import is_safe_image
15
  from pprint import pprint
16
+ from transformers import CLIPProcessor, CLIPModel
17
 
18
  lcm_text_to_image = LCMTextToImage()
19
  lcm_lora = LCMLora(
20
  base_model_id="Lykon/dreamshaper-7",
21
  lcm_lora_id="latent-consistency/lcm-lora-sdv1-5",
22
  )
23
+ model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
24
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
25
 
26
 
27
  # https://github.com/gradio-app/gradio/issues/2635#issuecomment-1423531319
 
60
  # lcm_diffusion_setting.image_height = 320 if is_openvino_device() else 512
61
  lcm_diffusion_setting.image_width = 512
62
  lcm_diffusion_setting.image_height = 512
63
+ lcm_diffusion_setting.use_openvino = True
64
  lcm_diffusion_setting.use_tiny_auto_encoder = False
65
  pprint(lcm_diffusion_setting.model_dump())
66
  lcm_text_to_image.init(lcm_diffusion_setting=lcm_diffusion_setting)
 
68
  images = lcm_text_to_image.generate(lcm_diffusion_setting)
69
  latency = perf_counter() - start
70
  print(f"Latency: {latency:.2f} seconds")
71
+ result = images[0]
72
+ if is_safe_image(model, processor, result):
73
+ return result # .resize([512, 512], PIL.Image.ANTIALIAS)
74
+ else:
75
+ print("Unsafe image detected")
76
+ return Image.new("RGB", (512, 512), (0, 0, 0))
77
 
78
 
79
  css = """