arsath-sm's picture
Update app.py
b113f1f verified
raw
history blame
1.82 kB
import gradio as gr
import tensorflow as tf
from tensorflow.keras.preprocessing import image
import numpy as np
from huggingface_hub import hf_hub_download
import os
def load_model_from_hub(repo_id, filename):
model_path = hf_hub_download(repo_id=repo_id, filename=filename)
return tf.keras.models.load_model(model_path)
# Load the models
model1 = load_model_from_hub("arsath-sm/face_classification_model1", "face_classification_model1.h5")
model2 = load_model_from_hub("arsath-sm/face_classification_model2", "face_classification_model2.h5")
# Preprocess the image
def preprocess_image(img):
img = img.resize((224, 224)) # Adjust size as needed
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = img / 255.0
return img
# Make predictions
def predict(img):
preprocessed_img = preprocess_image(img)
prediction1 = model1.predict(preprocessed_img)[0][0]
prediction2 = model2.predict(preprocessed_img)[0][0]
result1 = "Real" if prediction1 > 0.5 else "Fake"
result2 = "Real" if prediction2 > 0.5 else "Fake"
confidence1 = prediction1 if result1 == "Real" else 1 - prediction1
confidence2 = prediction2 if result2 == "Real" else 1 - prediction2
return {
"Model 1 Prediction": f"{result1} (Confidence: {confidence1:.2f})",
"Model 2 Prediction": f"{result2} (Confidence: {confidence2:.2f})"
}
# Create the Gradio interface
iface = gr.Interface(
fn=predict,
inputs=gr.Image(type="pil"),
outputs={
"Model 1 Prediction": gr.Textbox(),
"Model 2 Prediction": gr.Textbox()
},
title="Real vs AI Face Classification",
description="Upload an image to classify whether it's a real face or an AI-generated face using two different models."
)
# Launch the app
iface.launch()