File size: 3,756 Bytes
74e678c
 
 
 
 
 
 
 
5b1310d
 
74e678c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5b1310d
 
74e678c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d3a32bd
 
 
74e678c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import PIL
from torchvision import datasets, transforms, models
import torch
from PIL import Image
import torch.nn as nn 
import pandas as pd
import numpy as np 
import gradio as gr
import warnings
warnings.filterwarnings("ignore", category=UserWarning) 

class_names = ['apple_pie',
 'bibimbap',
 'cannoli',
 'edamame',
 'falafel',
 'french_toast',
 'ramen',
 'sushi',
 'tiramisu']

def pil_loader(path):
        with open(path, 'rb') as f:
            img = Image.open(f)
            return img.convert('RGB')

def predict(img_path):
    # Load and preprocess the image
    # image = pil_loader(img_path)
        # Convert Gradio image input to a NumPy array
    img_array = img_path.astype(np.uint8)

    # # Convert NumPy array to PIL Image
    image = Image.fromarray(img_array)

    test_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    # Apply transformations
    image = test_transforms(image)

    inf_model = models.resnet18(pretrained=False, weights='ResNet50_Weights.DEFAULT')

    num_ftrs = inf_model.fc.in_features
    # Here the size of each output sample is set to 2.
    # Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
    inf_model.fc = nn.Linear(num_ftrs, len(class_names))

# model_1 = model_1.to(device)
    inf_model.to(torch.device('cpu'))
    inf_model.load_state_dict(torch.load('./resnet18_tinyfood_classifier.pth', map_location='cpu'))

    # Perform inference
    with torch.no_grad():
        inf_model.eval()
        out = inf_model(image.unsqueeze(0))  # Add batch dimension
    
    # Get the predicted class and confidence
    _, preds = torch.max(out, 1)
    idx = preds.cpu().numpy()[0]
    pred_class = class_names[idx]

    # Assuming `out` is logits, you may need to apply softmax instead of sigmoid
    probabilities = torch.softmax(out, dim=1)  # Apply softmax to get probabilities
    confidence = probabilities[0, idx].item() * 100  # Get confidence for the predicted class

    nutrition_data_path = './food-data.csv'
    # Membaca file CSV
    df = pd.read_csv(nutrition_data_path)

    # Mencocokkan prediksi dengan data CSV
    if pred_class.capitalize() in df["Makanan"].values:
        row = df.loc[df["Makanan"] == pred_class.capitalize()]

        # Mengambil informasi gizi
        calories = row["Kalori"].values[0]
        protein = row["Protein"].values[0]
        fat = row["Lemak"].values[0]
        carbs = row["Karbohidrat"].values[0]
        fiber = row["Serat"].values[0]
        sugar = row["Gula"].values[0]
        price = row["Harga (Rp)"].values[0]

        return pred_class, calories, protein, fat, carbs, fiber, sugar, price
    else:
        nutrition_info = None
        return 'Food not found', 0, 0, 0, 0, 0, 0

    # return pred_class, confidence


# img_path = '/content/drive/MyDrive/Assignment-Citra-SkillacademyAI/bibimbap.jpeg'
# print(predict(img_path))

interface = gr.Interface(
    predict,
    inputs="image",
    title="Selera Cafe App",
    description="This App will provide the information of your food choice in Selera Cafe. The menu includes: Apple Pie, Bibimbap, Cannoli, Edamame, Falafel, French Toast, Ramen, Sushi, Tiramisu. Enjoy your food!",

    outputs=[
      gr.Text(label="Food Label"),
      gr.Number(label="Calories"),
      gr.Number(label="Protein"),
      gr.Number(label="Fat"),
      gr.Number(label="Carbs"),
      gr.Number(label="Fiber"),
      gr.Number(label="Sugar"),
      gr.Number(label="Price")
    ],
    examples = [
        './bibimbap.jpeg',
        './apple-pie.jpeg',
        './cannoli.jpeg'
    ])
interface.launch()