LPX
commited on
Commit
·
d307493
1
Parent(s):
f98ae1c
🏗️ build(utils): update image preprocess and functionality expansion
Browse files- move ELA functionality to separate utils for better modulation and maintenance【config】
🏗️ build(utils): update image preprocess and functionality expansion
- update ELA for better performance【Install libraries Pytorch, PIL and opencv】
🔧 chore(doc): update py files
- remove ON.NX dependencies【Remove onnx pytorch dependencies】
🔧 chore(doc): update py files
- update ignore files setting to educate CI autoignore build files
- .gitignore +3 -1
- app.py +3 -2
- utils/ela.py +21 -0
- utils/minmax.py +1 -1
- utils/onnx.py +0 -34
- utils/utils.py +0 -15
.gitignore
CHANGED
@@ -1,3 +1,5 @@
|
|
1 |
.env
|
2 |
*goat.py
|
3 |
-
.vscode
|
|
|
|
|
|
1 |
.env
|
2 |
*goat.py
|
3 |
+
.vscode
|
4 |
+
*onnx.py
|
5 |
+
./models/*
|
app.py
CHANGED
@@ -10,9 +10,10 @@ from PIL import Image
|
|
10 |
import numpy as np
|
11 |
import io
|
12 |
import logging
|
13 |
-
from utils.utils import softmax, augment_image, convert_pil_to_bytes
|
14 |
from utils.gradient import gradient_processing
|
15 |
from utils.minmax import preprocess as minmax_preprocess
|
|
|
16 |
|
17 |
|
18 |
# Configure logging
|
@@ -212,7 +213,7 @@ def generate_results_html(results):
|
|
212 |
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="3" stroke="currentColor" class="w-4 h-4 mr-2 -ml-3 group-hover:animate group-hover:animate-pulse">
|
213 |
{'<path stroke-linecap="round" stroke-linejoin="round" d="M9 12.75 11.25 15 15 9.75M21 12a9 9 0 1 1-18 0 9 9 0 0 1 18 0Z" />' if label == 'REAL' else '<path stroke-linecap="round" stroke-linejoin="round" d="m9.75 9.75 4.5 4.5m0-4.5-4.5 4.5M21 12a9 9 0 1 1-18 0 9 9 0 0 1 18 0Z" />'}
|
214 |
</svg>
|
215 |
-
<p class="whitespace-nowrap text-lg leading-normal font-bold text-center self-center align-middle py-px">{label}</p>
|
216 |
</span>
|
217 |
</div>
|
218 |
<div>
|
|
|
10 |
import numpy as np
|
11 |
import io
|
12 |
import logging
|
13 |
+
from utils.utils import softmax, augment_image, convert_pil_to_bytes
|
14 |
from utils.gradient import gradient_processing
|
15 |
from utils.minmax import preprocess as minmax_preprocess
|
16 |
+
from utils.ela import getELA as ELA
|
17 |
|
18 |
|
19 |
# Configure logging
|
|
|
213 |
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="3" stroke="currentColor" class="w-4 h-4 mr-2 -ml-3 group-hover:animate group-hover:animate-pulse">
|
214 |
{'<path stroke-linecap="round" stroke-linejoin="round" d="M9 12.75 11.25 15 15 9.75M21 12a9 9 0 1 1-18 0 9 9 0 0 1 18 0Z" />' if label == 'REAL' else '<path stroke-linecap="round" stroke-linejoin="round" d="m9.75 9.75 4.5 4.5m0-4.5-4.5 4.5M21 12a9 9 0 1 1-18 0 9 9 0 0 1 18 0Z" />'}
|
215 |
</svg>
|
216 |
+
<p class="text-base whitespace-nowrap lg:text-lg leading-normal font-bold text-center self-center align-middle py-px">{label}</p>
|
217 |
</span>
|
218 |
</div>
|
219 |
<div>
|
utils/ela.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import io
|
3 |
+
from PIL import Image, ImageFilter, ImageChops
|
4 |
+
from torchvision import transforms
|
5 |
+
|
6 |
+
def genELA(img_pil, scale=77, alpha=0.66):
|
7 |
+
# Error Level Analysis for basic image forensics
|
8 |
+
original = img_pil.copy() # open up the input image
|
9 |
+
temp_path = 'temp.jpg' # temporary image name to save the ELA to
|
10 |
+
original.save(temp_path, quality=95) # re-save the image with a quality of 95%
|
11 |
+
temporary = Image.open(temp_path) # open up the re-saved image
|
12 |
+
|
13 |
+
diff = ImageChops.difference(original, temporary) # load in the images to look at pixel by pixel differences
|
14 |
+
d = diff.load() # load the image into a variable
|
15 |
+
WIDTH, HEIGHT = diff.size # set the size into a tuple
|
16 |
+
for x in range(WIDTH): # row by row
|
17 |
+
for y in range(HEIGHT): # column by column
|
18 |
+
d[x, y] = tuple(k * scale for k in d[x, y]) # set the pixels to their x,y & color based on error
|
19 |
+
|
20 |
+
new_img = ImageChops.blend(temporary, diff, alpha) # blend the original w/ the ELA @ a set alpha/transparency
|
21 |
+
return new_img
|
utils/minmax.py
CHANGED
@@ -27,7 +27,7 @@ def blk_filter(img, radius):
|
|
27 |
)
|
28 |
return cv.normalize(result, None, 0, 127, cv.NORM_MINMAX, cv.CV_8UC1)
|
29 |
|
30 |
-
def preprocess(image, channel=4, radius=
|
31 |
if not isinstance(image, np.ndarray):
|
32 |
image = np.array(image) # Ensure image is a NumPy array
|
33 |
if channel == 0:
|
|
|
27 |
)
|
28 |
return cv.normalize(result, None, 0, 127, cv.NORM_MINMAX, cv.CV_8UC1)
|
29 |
|
30 |
+
def preprocess(image, channel=4, radius=2):
|
31 |
if not isinstance(image, np.ndarray):
|
32 |
image = np.array(image) # Ensure image is a NumPy array
|
33 |
if channel == 0:
|
utils/onnx.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from transformers import AutoImageProcessor, Swinv2ForImageClassification
|
3 |
-
import onnx
|
4 |
-
import onnxruntime as ort
|
5 |
-
|
6 |
-
# Load the model and processor
|
7 |
-
image_processor = AutoImageProcessor.from_pretrained("haywoodsloan/ai-image-detector-deploy")
|
8 |
-
model = Swinv2ForImageClassification.from_pretrained("haywoodsloan/ai-image-detector-deploy")
|
9 |
-
|
10 |
-
# Set the model to evaluation mode
|
11 |
-
model.eval()
|
12 |
-
|
13 |
-
# Create dummy input for tracing
|
14 |
-
dummy_input = torch.randn(1, 3, 256, 256) # Batch size of 1, 3 color channels, 256x256 image
|
15 |
-
|
16 |
-
# Export the model to ONNX
|
17 |
-
onnx_model_path = "model.onnx"
|
18 |
-
# torch.onnx.export(
|
19 |
-
# model,
|
20 |
-
# dummy_input,
|
21 |
-
# onnx_model_path,
|
22 |
-
# input_names=["pixel_values"],
|
23 |
-
# output_names=["logits"],
|
24 |
-
# opset_version=11,
|
25 |
-
# dynamic_axes={
|
26 |
-
# "pixel_values": {0: "batch_size"},
|
27 |
-
# "logits": {0: "batch_size"}
|
28 |
-
# }
|
29 |
-
# )
|
30 |
-
|
31 |
-
# Verify the ONNX model
|
32 |
-
# onnx_model = onnx.load(onnx_model_path)
|
33 |
-
# onnx.checker.check_model(onnx_model)
|
34 |
-
print("The ONNX model is valid.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/utils.py
CHANGED
@@ -24,19 +24,4 @@ def convert_pil_to_bytes(image, format='JPEG'):
|
|
24 |
img_byte_arr = img_byte_arr.getvalue()
|
25 |
return img_byte_arr
|
26 |
|
27 |
-
def ELA(img_pil, scale=77, alpha=0.66):
|
28 |
-
# Error Level Analysis for basic image forensics
|
29 |
-
original = img_pil.copy() # open up the input image
|
30 |
-
temp_path = 'temp.jpg' # temporary image name to save the ELA to
|
31 |
-
original.save(temp_path, quality=95) # re-save the image with a quality of 95%
|
32 |
-
temporary = Image.open(temp_path) # open up the re-saved image
|
33 |
|
34 |
-
diff = ImageChops.difference(original, temporary) # load in the images to look at pixel by pixel differences
|
35 |
-
d = diff.load() # load the image into a variable
|
36 |
-
WIDTH, HEIGHT = diff.size # set the size into a tuple
|
37 |
-
for x in range(WIDTH): # row by row
|
38 |
-
for y in range(HEIGHT): # column by column
|
39 |
-
d[x, y] = tuple(k * scale for k in d[x, y]) # set the pixels to their x,y & color based on error
|
40 |
-
|
41 |
-
new_img = ImageChops.blend(temporary, diff, alpha) # blend the original w/ the ELA @ a set alpha/transparency
|
42 |
-
return new_img
|
|
|
24 |
img_byte_arr = img_byte_arr.getvalue()
|
25 |
return img_byte_arr
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|