Duplicate from suko/nsfw
Browse filesCo-authored-by: SuKo Kuo <[email protected]>
- .gitattributes +34 -0
- README.md +14 -0
- app.py +105 -0
- labels.txt +2 -0
- model.onnx +3 -0
- requirements.txt +4 -0
- signature.json +1 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Nsfw Space
|
3 |
+
emoji: 💩
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: green
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.21.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
duplicated_from: suko/nsfw
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
from PIL import Image
|
6 |
+
import onnxruntime as rt
|
7 |
+
class ONNXModel:
|
8 |
+
def __init__(self, dir_path) -> None:
|
9 |
+
"""Method to get name of model file. Assumes model is in the parent directory for script."""
|
10 |
+
model_dir = os.path.dirname(dir_path)
|
11 |
+
with open(os.path.join(model_dir, "signature.json"), "r") as f:
|
12 |
+
self.signature = json.load(f)
|
13 |
+
self.model_file = os.path.join(model_dir, self.signature.get("filename"))
|
14 |
+
if not os.path.isfile(self.model_file):
|
15 |
+
raise FileNotFoundError(f"Model file does not exist")
|
16 |
+
# get the signature for model inputs and outputs
|
17 |
+
self.signature_inputs = self.signature.get("inputs")
|
18 |
+
self.signature_outputs = self.signature.get("outputs")
|
19 |
+
self.session = None
|
20 |
+
if "Image" not in self.signature_inputs:
|
21 |
+
raise ValueError("ONNX model doesn't have 'Image' input! Check signature.json, and please report issue to Lobe.")
|
22 |
+
# Look for the version in signature file.
|
23 |
+
# If it's not found or the doesn't match expected, print a message
|
24 |
+
version = self.signature.get("export_model_version")
|
25 |
+
if version is None or version != EXPORT_MODEL_VERSION:
|
26 |
+
print(
|
27 |
+
f"There has been a change to the model format. Please use a model with a signature 'export_model_version' that matches {EXPORT_MODEL_VERSION}."
|
28 |
+
)
|
29 |
+
|
30 |
+
def load(self) -> None:
|
31 |
+
"""Load the model from path to model file"""
|
32 |
+
# Load ONNX model as session.
|
33 |
+
self.session = rt.InferenceSession(path_or_bytes=self.model_file)
|
34 |
+
|
35 |
+
def predict(self, image: Image.Image) -> dict:
|
36 |
+
"""
|
37 |
+
Predict with the ONNX session!
|
38 |
+
"""
|
39 |
+
# process image to be compatible with the model
|
40 |
+
img = self.process_image(image, self.signature_inputs.get("Image").get("shape"))
|
41 |
+
# run the model!
|
42 |
+
fetches = [(key, value.get("name")) for key, value in self.signature_outputs.items()]
|
43 |
+
# make the image a batch of 1
|
44 |
+
feed = {self.signature_inputs.get("Image").get("name"): [img]}
|
45 |
+
outputs = self.session.run(output_names=[name for (_, name) in fetches], input_feed=feed)
|
46 |
+
return self.process_output(fetches, outputs)
|
47 |
+
|
48 |
+
def process_image(self, image: Image.Image, input_shape: list) -> np.ndarray:
|
49 |
+
"""
|
50 |
+
Given a PIL Image, center square crop and resize to fit the expected model input, and convert from [0,255] to [0,1] values.
|
51 |
+
"""
|
52 |
+
width, height = image.size
|
53 |
+
# ensure image type is compatible with model and convert if not
|
54 |
+
if image.mode != "RGB":
|
55 |
+
image = image.convert("RGB")
|
56 |
+
# center crop image (you can substitute any other method to make a square image, such as just resizing or padding edges with 0)
|
57 |
+
if width != height:
|
58 |
+
square_size = min(width, height)
|
59 |
+
left = (width - square_size) / 2
|
60 |
+
top = (height - square_size) / 2
|
61 |
+
right = (width + square_size) / 2
|
62 |
+
bottom = (height + square_size) / 2
|
63 |
+
# Crop the center of the image
|
64 |
+
image = image.crop((left, top, right, bottom))
|
65 |
+
# now the image is square, resize it to be the right shape for the model input
|
66 |
+
input_width, input_height = input_shape[1:3]
|
67 |
+
if image.width != input_width or image.height != input_height:
|
68 |
+
image = image.resize((input_width, input_height))
|
69 |
+
|
70 |
+
# make 0-1 float instead of 0-255 int (that PIL Image loads by default)
|
71 |
+
image = np.asarray(image) / 255.0
|
72 |
+
# format input as model expects
|
73 |
+
return image.astype(np.float32)
|
74 |
+
|
75 |
+
def process_output(self, fetches: dict, outputs: dict) -> dict:
|
76 |
+
# un-batch since we ran an image with batch size of 1,
|
77 |
+
# convert to normal python types with tolist(), and convert any byte strings to normal strings with .decode()
|
78 |
+
out_keys = ["label", "confidence"]
|
79 |
+
results = {}
|
80 |
+
for i, (key, _) in enumerate(fetches):
|
81 |
+
val = outputs[i].tolist()[0]
|
82 |
+
if isinstance(val, bytes):
|
83 |
+
val = val.decode()
|
84 |
+
results[key] = val
|
85 |
+
confs = results["Confidences"]
|
86 |
+
labels = self.signature.get("classes").get("Label")
|
87 |
+
output = [dict(zip(out_keys, group)) for group in zip(labels, confs)]
|
88 |
+
sorted_output = {"predictions": sorted(output, key=lambda k: k["confidence"], reverse=True)}
|
89 |
+
return sorted_output
|
90 |
+
EXPORT_MODEL_VERSION=1
|
91 |
+
model = ONNXModel(dir_path="model.onnx")
|
92 |
+
model.load()
|
93 |
+
|
94 |
+
def predict(image):
|
95 |
+
image = Image.fromarray(np.uint8(image), 'RGB')
|
96 |
+
prediction = model.predict(image)
|
97 |
+
for output in prediction["predictions"]:
|
98 |
+
output["confidence"] = round(output["confidence"], 4)
|
99 |
+
return prediction
|
100 |
+
|
101 |
+
inputs = gr.inputs.Image(type="pil")
|
102 |
+
outputs = gr.outputs.JSON()
|
103 |
+
|
104 |
+
runtime=gr.Interface(title="Naked Detector",fn=predict, inputs=inputs, outputs=outputs)
|
105 |
+
runtime.launch()
|
labels.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Naked
|
2 |
+
Safe
|
model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c68efed73e1f740c1e0847d51a37960c06c880e84acc710bb4b65ce9db6857a1
|
3 |
+
size 99511952
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
numpy
|
3 |
+
pillow
|
4 |
+
onnxruntime
|
signature.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"doc_id": "6bfa3ac3-7170-4519-9a3d-777096d3423b", "doc_name": "NSFW", "doc_version": "c02bcb9a6cb4348ce6a87bed14754843", "format": "onnx", "version": 433, "inputs": {"Image": {"dtype": "float32", "shape": [null, 224, 224, 3], "name": "Image:0"}}, "outputs": {"Confidences": {"dtype": "float32", "shape": [null, 2], "name": "sequential/dense_2/Softmax:0"}}, "tags": [], "classes": {"Label": ["Naked", "Safe"]}, "filename": "model.onnx", "export_model_version": 1}
|