modelProblems / app.py
temp-9384289
tester
0fceaca
raw
history blame
6.46 kB
# notes https://huggingface.co/spaces/Joeythemonster/Text-To-image-AllModels/blob/main/app.py
import tensorflow as tf
from diffusers import DiffusionPipeline
import spaces
# import torch
import PIL.Image
from PIL import Image
from torch.autograd import Variable
import gradio as gr
import gradio.components as grc
import numpy as np
from huggingface_hub import from_pretrained_keras
from image_similarity_measures.evaluate import evaluation
import keras
import time
import requests
import matplotlib.pyplot as plt
import os
from pytorch_msssim import ssim, ms_ssim, SSIM, MS_SSIM
from gradio_imageslider import ImageSlider
# os.environ['KMP_DUPLICATE_LIB_OK']='TRUE'
# options = ['Placeholder A', 'Placeholder B', 'Placeholder C']
# pipeline = DiffusionPipeline.from_pretrained("nathanReitinger/MNIST-diffusion-oneImage")
# device = "cuda" if torch.cuda.is_available() else "cpu"
# pipeline = pipeline.to(device=device)
# @spaces.GPU
# def predict(steps, seed):
# print("HI")
# generator = torch.manual_seed(seed)
# for i in range(1,steps):
# yield pipeline(generator=generator, num_inference_steps=i).images[0]
# gr.Interface(
# predict,
# inputs=[
# grc.Slider(0, 1000, label='Inference Steps', value=42, step=1),
# grc.Slider(0, 2147483647, label='Seed', value=42, step=1),
# ],
# outputs=gr.Image(height=28, width=28, type="pil", elem_id="output_image"),
# css="#output_image{width: 256px !important; height: 256px !important;}",
# title="Model Problems: Infringing on MNIST!",
# description="Opening the black box.",
# ).queue().launch()
from diffusers import StableDiffusionPipeline
import torch
modellist=['nathanReitinger/MNIST-diffusion-oneImage',
'nathanReitinger/MNIST-diffusion',
# 'nathanReitinger/MNIST-GAN',
# 'nathanReitinger/MNIST-GAN-noDropout'
]
# pipeline = DiffusionPipeline.from_pretrained("nathanReitinger/MNIST-diffusion-oneImage")
# device = "cuda" if torch.cuda.is_available() else "cpu"
# pipeline = pipeline.to(device=device)
def getModel(model):
model_id = model
(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()
RANDO = str(time.time())
file_path = 'tester/' + model_id.replace("/", "-") + "/" + RANDO + '/'
os.makedirs(file_path)
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
print(model_id)
image = None
if 'diffusion' in model_id:
pipe = DiffusionPipeline.from_pretrained(model_id)
pipe = pipe.to("cpu")
image = pipe(generator= torch.manual_seed(42), num_inference_steps=1).images[0]
else:
pipe = DiffusionPipeline.from_pretrained('nathanReitinger/MNIST-diffusion')
pipe = pipe.to("cpu")
test = from_pretrained_keras('nathanReitinger/MNIST-GAN')
image = pipe(generator= torch.manual_seed(42), num_inference_steps=40).images[0]
########################################### let's save this image for comparison to others
fig = plt.figure(figsize=(1, 1))
plt.subplot(1, 1, 0+1)
plt.imshow(image, cmap='gray')
plt.axis('off')
plt.savefig(file_path + 'generated_image.png')
plt.close()
API_URL = "https://api-inference.huggingface.co/models/farleyknight/mnist-digit-classification-2022-09-04"
# get a prediction on what number this is
def query(filename):
with open(filename, "rb") as f:
data = f.read()
response = requests.post(API_URL, data=data)
return response.json()
# use latest model to generate a new image, return path
ret = False
output = None
while ret == False:
output = query(file_path + 'generated_image.png')
if 'error' in output:
time.sleep(10)
ret = False
else:
ret = True
print(output)
low_score_log = ''
this_label_for_this_image = int(output[0]['label'])
low_score_log += "this image has been identified as a:" + str(this_label_for_this_image) + "\n" + str(output) + "\n"
print("===================")
lowest_score = 10000
lowest_image = None
for i in range(len(train_labels)):
# print(i)
if train_labels[i] == this_label_for_this_image:
###
# get a real image (of correct number)
###
# print(i)
to_check = train_images[i]
fig = plt.figure(figsize=(1, 1))
plt.subplot(1, 1, 0+1)
plt.imshow(to_check, cmap='gray')
plt.axis('off')
plt.savefig(file_path + 'real_deal.png')
plt.close()
# baseline = evaluation(org_img_path='results/real_deal.png', pred_img_path='results/real_deal.png', metrics=["rmse", "psnr"])
# print("---")
###
# check how close that real training data is to generated number
###
results = evaluation(org_img_path=file_path + 'real_deal.png', pred_img_path=file_path+'generated_image.png', metrics=["rmse", "psnr"])
if results['rmse'] < lowest_score:
lowest_score = results['rmse']
lowest_image = to_check
# image1 = np.array(Image.open(file_path + 'real_deal.png'))
# image2 = np.array(Image.open(file_path + 'generated_image.png'))
# img1 = torch.from_numpy(image1).float().unsqueeze(0).unsqueeze(0)/255.0
# img2 = torch.from_numpy(image2).float().unsqueeze(0).unsqueeze(0)/255.0
# img1 = Variable( img1, requires_grad=False)
# img2 = Variable( img2, requires_grad=True)
# ssim_score = ssim(img1, img2).item()
# # sys.exit()
# # l2 = distance.euclidean(image1, image2)
# low_score_log += 'rmse score:' + str(lowest_score) + "\n"
# low_score_log += 'ssim score:' + str(ssim_score) + "\n"
# low_score_log += 'found when:' + str(round( ((i/len(train_labels)) * 100),2 )) + '%' + "\n"
# low_score_log += "---------\n"
# print(lowest_score, ssim_score, str(round( ((i/len(train_labels)) * 100),2 )) + '%')
# fig = plt.figure(figsize=(1, 1))
# plt.subplot(1, 1, 0+1)
# plt.imshow(to_check, cmap='gray')
# plt.axis('off')
# plt.savefig(file_path+str(i) + "--" + str(lowest_score) + '---most_close.png')
# plt.close()
# f = open(file_path + "score_log.txt", "w+")
# f.write(low_score_log)
# f.close()
print("Done!")
############################################ return image that you just generated
return [image, lowest_image]
import gradio as gr
output = "image"
interface = gr.Interface(fn=getModel, inputs=[gr.Dropdown(modellist)], css="#output_image{width: 256px !important; height: 256px !important;}", outputs=output, title='Model Problems (infringement)') # outputs="image",
interface.launch()