File size: 2,096 Bytes
5ce154b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99f63ba
 
 
 
 
 
 
 
 
 
 
b629499
99f63ba
 
 
5ce154b
 
 
 
 
 
 
 
 
 
 
74680d3
5ce154b
 
 
 
 
4948ba7
 
ebce6f4
a2a7681
fc93818
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import tensorflow as tf
tf.__version__
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras.applications import vgg16
#from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, Input, Dense, Flatten
from tensorflow.keras.utils import load_img, img_to_array
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras.applications import vgg16
#from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, Input, Dense, Flatten
from tensorflow.keras.utils import load_img, img_to_array
from sklearn.metrics import confusion_matrix


vgg16_model = vgg16.VGG16(include_top=False, input_shape=(224,224,3), weights='imagenet')
vgg16_model.trainable = False
model = Sequential()
model.add(vgg16_model)
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['Accuracy'])
model.load_weights("cp.ckpt")





import gradio as gr
import numpy as np
from PIL import Image

def sepia(input_img_path):
    img = load_img(input_img_path,target_size=(224,224))
    img = img_to_array(img)
    img = img / 255
    img = img.reshape(1,224,224,3)
    p = (model.predict(img)>=0.5).astype(int)[0][0]
    if p==0:
        return "Men"
    else:
        return "women"

demo = gr.Interface(fn=sepia,inputs= gr.Image(type="filepath",height=700,width=600),outputs="text",
                    examples=[ ["0008.jpg"],['0059.jpg'],['0d0408eb-b0f2-454b-b4cf-1bd9d48ab3a5.jpg'],
                              ['20240609_175821.jpg'],['20240609_180301.jpg'],['20240609_181603.jpg'],
                              ['20240609_180539.jpg'],['5ac73492-f665-48a1-a9c9-320bd7dfa1b6.jpg']])
demo.launch()