File size: 2,593 Bytes
04f475a
800f4d4
04f475a
ff3533c
f825898
04f475a
f825898
800f4d4
 
 
04f475a
 
f825898
 
800f4d4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f825898
800f4d4
 
 
f825898
 
ff3533c
94c304e
37222e0
 
800f4d4
 
ff3533c
800f4d4
 
94c304e
04f475a
 
 
800f4d4
 
 
04f475a
 
 
 
 
800f4d4
04f475a
f825898
800f4d4
f825898
 
94c304e
800f4d4
f825898
 
 
800f4d4
f825898
 
800f4d4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import streamlit as st
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
from PIL import Image

# Load the image classification pipeline
@st.cache_resource
def load_image_classification_pipeline():
    """
    Load the image classification pipeline using a pretrained model.
    """
    return pipeline("image-classification", model="Shresthadev403/food-image-classification")

pipe_classification = load_image_classification_pipeline()

# Load Qwen tokenizer and model
@st.cache_resource
def load_qwen_model():
    """
    Load the Qwen/Qwen2.5-Coder-32B-Instruct model and tokenizer.
    """
    tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-32B-Instruct")
    model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-Coder-32B-Instruct", device_map="auto")
    return tokenizer, model

# Function to generate ingredients using Qwen
def get_ingredients_qwen(food_name, tokenizer, model):
    """
    Generate a list of ingredients for the given food item using the Qwen model.
    """
    prompt = f"List the main ingredients typically used to prepare {food_name}:"
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    outputs = model.generate(**inputs, max_new_tokens=50)
    return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()

# Streamlit app
st.title("Food Image Recognition with Ingredients")

# # Add the provided image as a banner
# st.image("CTP_Project/IR_IMAGE", caption="Food Recognition Model", use_column_width=True)

# Sidebar for model information
st.sidebar.title("Model Information")
st.sidebar.write("**Image Classification Model**: Shresthadev403/food-image-classification")
st.sidebar.write("**LLM for Ingredients**: Qwen2.5-Coder-32B-Instruct")

# Upload image
uploaded_file = st.file_uploader("Choose a food image...", type=["jpg", "png", "jpeg"])

# Load the Qwen model and tokenizer
tokenizer, model = load_qwen_model()

if uploaded_file is not None:
    # Display the uploaded image
    image = Image.open(uploaded_file)
    st.image(image, caption="Uploaded Image", use_column_width=True)
    st.write("Classifying...")

    # Make predictions
    predictions = pipe_classification(image)

    # Display only the top prediction
    top_food = predictions[0]['label']
    st.header(f"Food: {top_food}")

    # Generate and display ingredients for the top prediction
    st.subheader("Ingredients")
    try:
        ingredients = get_ingredients_qwen(top_food, tokenizer, model)
        st.write(ingredients)
    except Exception as e:
        st.error(f"Error generating ingredients: {e}")