File size: 3,910 Bytes
3a18ad0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199c1bb
 
 
 
 
 
 
 
 
 
 
 
 
3a18ad0
 
199c1bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93a274c
3a18ad0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import streamlit as st
import pandas as pd
import numpy as np
from PIL import Image
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import load_model


# Load the Models

model = load_model('model.h5')
img_size = (64, 64)
# Define a function to preprocess the input image
def preprocess_input_image(img_path):
    img = image.load_img(img_path, target_size=img_size)
    img1 = image.load_img(img_path)
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x /= 255.
    return x, img1

import os
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from datasets import load_dataset

# Define the path to the dataset
dataset_path = 'andrewsunanda/fast_food_image_classification'

# Load the dataset from Hugging Face
dataset = load_dataset(dataset_path)

# Define the batch size and image size
batch_size = 256
img_size = (64, 64)

# Define the paths to the train, validation, and test folders
train_path = os.path.join(dataset_path, 'Train')
valid_path = os.path.join(dataset_path, 'Valid')
test_path = os.path.join(dataset_path, 'Test')

# Define the transforms for the dataset
transform = transforms.Compose([
    transforms.Resize(img_size),
    transforms.ToTensor(),
])

# Load the training dataset
train_dataset = dataset['train']
train_dataset = train_dataset.map(lambda x: {'image': transform(x['image']), 'label': x['label']})
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

# Load the validation dataset
valid_dataset = dataset['validation']
valid_dataset = valid_dataset.map(lambda x: {'image': transform(x['image']), 'label': x['label']})
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)

# Load the testing dataset
test_dataset = dataset['test']
test_dataset = test_dataset.map(lambda x: {'image': transform(x['image']), 'label': x['label']})
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

# Create data generators for training, validation, and testing
train_datagen = ImageDataGenerator(
    rescale=1./255, 
    horizontal_flip=True
)

valid_datagen = ImageDataGenerator(
    rescale=1./255
)
test_datagen = ImageDataGenerator(
    rescale=1./255
)

train_generator = train_datagen.flow_from_directory(
    train_path, 
    target_size=img_size, 
    batch_size=batch_size, 
class_mode='categorical'
)

valid_generator = valid_datagen.flow_from_directory(
    valid_path, 
    target_size=img_size, 
    batch_size=batch_size, 
    class_mode='categorical'
)

test_generator = test_datagen.flow_from_directory(
    test_path,                                                 
    target_size=img_size, 
    batch_size=batch_size, 
    class_mode='categorical'
)

   
class_names = list(train_generator.class_indices.keys())
train_classes = pd.Series(train_generator.classes)
test_classes = pd.Series(test_generator.classes)
valid_classes = pd.Series(valid_generator.classes)

def run():

    st.title('Fast Food Image Prediction')


    with st.form(key='form_food'):
        uploaded_file = st.file_uploader("Choose an image file", type=["jpg", "jpeg", "png"])
        
        submitted = st.form_submit_button('Predict')


    
    if submitted:
        # Transform Inference-Set
        if uploaded_file is not None:
            # Preprocess the input image
            img = Image.open(uploaded_file)
            x = np.array(img.resize(img_size))/255.
            x = np.expand_dims(x, axis=0)
        
        # Make predictions on the input image
        preds = model.predict(x, verbose=0)
        pred_class = np.argmax(preds)
        pred_class_name = class_names[pred_class]
        
        # Display the input image and prediction result
        st.image(img, caption=f"Predicted Class: {pred_class_name}", use_column_width=True)



if __name__ == '__main__':
    run()