LuckyHappyFish commited on
Commit
3189d1f
·
verified ·
1 Parent(s): d0c0124

made change

Browse files
Files changed (1) hide show
  1. app.py +176 -50
app.py CHANGED
@@ -3,30 +3,114 @@ from transformers import pipeline
3
  from PIL import Image
4
  from huggingface_hub import InferenceClient
5
  import os
 
 
6
  from gradio_client import Client
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  # Hugging Face API key
9
  API_KEY = st.secrets["HF_API_KEY"]
10
-
11
- # Initialize the Hugging Face Inference Client
12
  client = InferenceClient(api_key=API_KEY)
13
 
14
- # Load the image classification pipeline
15
  @st.cache_resource
16
  def load_image_classification_pipeline():
17
- """
18
- Load the image classification pipeline using a pretrained model.
19
- """
20
  return pipeline("image-classification", model="Shresthadev403/food-image-classification")
21
 
22
  pipe_classification = load_image_classification_pipeline()
23
 
24
- # Function to generate ingredients using Hugging Face Inference Client
25
  def get_ingredients_qwen(food_name):
26
- """
27
- Generate a list of ingredients for the given food item using Qwen NLP model.
28
- Returns a clean, comma-separated list of ingredients.
29
- """
30
  messages = [
31
  {
32
  "role": "user",
@@ -36,57 +120,99 @@ def get_ingredients_qwen(food_name):
36
  ]
37
  try:
38
  completion = client.chat.completions.create(
39
- model="Qwen/Qwen2.5-Coder-32B-Instruct",
40
- messages=messages,
41
- max_tokens=50
42
  )
43
- generated_text = completion.choices[0].message["content"].strip()
44
  return generated_text
45
  except Exception as e:
46
  return f"Error generating ingredients: {e}"
47
 
48
- # Streamlit app setup
49
- st.title("Food Image Recognition with Ingredients")
50
 
51
- # Add banner image
52
- st.image("IR_IMAGE.png", caption="Food Recognition Model", use_container_width=True)
53
 
54
- # Sidebar for model information
55
- st.sidebar.title("Model Information")
56
- st.sidebar.write("**Image Classification Model**: Shresthadev403/food-image-classification")
57
- st.sidebar.write("**LLM for Ingredients**: Qwen/Qwen2.5-Coder-32B-Instruct")
 
 
 
 
 
58
 
59
- # Upload image
60
- uploaded_file = st.file_uploader("Choose a food image...", type=["jpg", "png", "jpeg"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  if uploaded_file is not None:
63
- # Display the uploaded image
64
- image = Image.open(uploaded_file)
65
- st.image(image, caption="Uploaded Image", use_container_width=True)
66
- st.write("Classifying...")
 
 
 
 
 
67
 
68
- # Make predictions
69
- predictions = pipe_classification(image)
70
 
71
- # Display only the top prediction
72
- top_food = predictions[0]['label']
73
- st.header(f"Food: {top_food}")
 
 
 
 
 
74
 
75
- # Generate and display ingredients for the top prediction
76
- st.subheader("Ingredients")
77
- try:
78
- ingredients = get_ingredients_qwen(top_food)
79
- st.write(ingredients)
80
- except Exception as e:
81
- st.error(f"Error generating ingredients: {e}")
82
 
83
- st.subheader("Healthier alternatives:")
84
- try:
85
- client = Client("https://66cd04274e7fd11327.gradio.live/")
86
- result = client.predict(query=f"What's a healthy {top_food} recipe, and why is it healthy?", api_name="/get_response")
87
- st.write(result)
88
- except Exception as e:
89
- st.error(f"Unable to contact RAG: {e}")
 
 
 
 
 
 
 
 
 
90
 
91
- # Footer
92
- st.sidebar.markdown("Developed by Muhammad Hassan Butt.")
 
3
  from PIL import Image
4
  from huggingface_hub import InferenceClient
5
  import os
6
+ import openai
7
+ from openai.error import OpenAIError
8
  from gradio_client import Client
9
 
10
+ # Set page configuration
11
+ st.set_page_config(
12
+ page_title="Plate Mate - Your Culinary Assistant",
13
+ page_icon="🍽️",
14
+ layout="centered", # center content for better mobile experience
15
+ initial_sidebar_state="collapsed",
16
+ )
17
+
18
+ def local_css():
19
+ st.markdown(
20
+ """
21
+ <style>
22
+ /* General resets */
23
+ body, html {
24
+ margin: 0;
25
+ padding: 0;
26
+ font-family: "Helvetica Neue", Arial, sans-serif;
27
+ background-color: #f9f9f9;
28
+ }
29
+
30
+ /* Container and spacing */
31
+ .css-1aumxhk, .css-keje6w, .css-18e3th9, .css-12oz5g7 {
32
+ padding-left: 0 !important;
33
+ padding-right: 0 !important;
34
+ }
35
+
36
+ /* Title styling */
37
+ .title h1 {
38
+ text-align: center;
39
+ font-size: 2.5em;
40
+ margin-bottom: 0.5em;
41
+ color: #333;
42
+ }
43
+
44
+ /* Subheader styling */
45
+ h2, h3, h4, h5, h6 {
46
+ color: #555;
47
+ margin-bottom: 0.5em;
48
+ }
49
+
50
+ /* Adjust image styling */
51
+ img {
52
+ max-width: 100%;
53
+ height: auto;
54
+ border-radius: 8px;
55
+ }
56
+
57
+ /* On mobile, reduce font sizes and margins */
58
+ @media (max-width: 600px) {
59
+ .title h1 {
60
+ font-size: 1.8em;
61
+ }
62
+
63
+ h2, h3, h4 {
64
+ font-size: 1em;
65
+ }
66
+
67
+ .stButton button {
68
+ width: 100%;
69
+ }
70
+ }
71
+
72
+ /* Sidebar adjustments */
73
+ [data-testid="stSidebar"] {
74
+ width: 250px;
75
+ background: #fff;
76
+ }
77
+
78
+ /* Preset images container */
79
+ .preset-container {
80
+ display: flex;
81
+ flex-wrap: wrap;
82
+ gap: 10px;
83
+ justify-content: center;
84
+ margin: 1em 0;
85
+ }
86
+ .preset-container img {
87
+ width: 80px;
88
+ height: 80px;
89
+ object-fit: cover;
90
+ cursor: pointer;
91
+ border: 2px solid transparent;
92
+ }
93
+ .preset-container img:hover {
94
+ border: 2px solid #007BFF;
95
+ }
96
+
97
+ </style>
98
+ """, unsafe_allow_html=True
99
+ )
100
+
101
+ local_css() # Apply the CSS
102
+
103
  # Hugging Face API key
104
  API_KEY = st.secrets["HF_API_KEY"]
 
 
105
  client = InferenceClient(api_key=API_KEY)
106
 
 
107
  @st.cache_resource
108
  def load_image_classification_pipeline():
 
 
 
109
  return pipeline("image-classification", model="Shresthadev403/food-image-classification")
110
 
111
  pipe_classification = load_image_classification_pipeline()
112
 
 
113
  def get_ingredients_qwen(food_name):
 
 
 
 
114
  messages = [
115
  {
116
  "role": "user",
 
120
  ]
121
  try:
122
  completion = client.chat.completions.create(
123
+ model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, max_tokens=50
 
 
124
  )
125
+ generated_text = completion.choices[0]['message']['content'].strip()
126
  return generated_text
127
  except Exception as e:
128
  return f"Error generating ingredients: {e}"
129
 
130
+ openai.api_key = st.secrets["openai"]
 
131
 
132
+ st.markdown('<div class="title"><h1>PlateMate - Your Culinary Assistant</h1></div>', unsafe_allow_html=True)
 
133
 
134
+ # Banner Image (Smaller or optional)
135
+ banner_image_path = "IR_IMAGE.png"
136
+ if os.path.exists(banner_image_path):
137
+ # Display a smaller version of the banner
138
+ col1, col2, col3 = st.columns([1,3,1])
139
+ with col2:
140
+ st.image(banner_image_path, use_container_width=True)
141
+ else:
142
+ st.warning(f"Banner image '{banner_image_path}' not found.")
143
 
144
+ # Sidebar Info
145
+ with st.sidebar:
146
+ st.title("Model Information")
147
+ st.write("**Image Classification Model:**")
148
+ st.write("Shresthadev403/food-image-classification")
149
+ st.write("**LLM for Ingredients:**")
150
+ st.write("Qwen/Qwen2.5-Coder-32B-Instruct")
151
+ st.markdown("---")
152
+ st.markdown("<p style='text-align: center;'>Developed by Muhammad Hassan Butt.</p>", unsafe_allow_html=True)
153
+
154
+ st.subheader("Upload a food image:")
155
+
156
+ # Preset Images
157
+ preset_images = {
158
+ "Pizza": "sample_pizza.png",
159
+ "Salad": "sample_salad.png",
160
+ "Sushi": "sample_sushi.png"
161
+ }
162
+
163
+ selected_preset = st.selectbox("Or choose a preset sample image:", ["None"] + list(preset_images.keys()))
164
+ if selected_preset != "None":
165
+ uploaded_file = preset_images[selected_preset]
166
+ else:
167
+ uploaded_file = st.file_uploader("", type=["jpg", "png", "jpeg"])
168
 
169
  if uploaded_file is not None:
170
+ if isinstance(uploaded_file, str):
171
+ # Use the preset image
172
+ if os.path.exists(uploaded_file):
173
+ image = Image.open(uploaded_file)
174
+ else:
175
+ st.error(f"Sample image '{uploaded_file}' not found.")
176
+ image = None
177
+ else:
178
+ image = Image.open(uploaded_file)
179
 
180
+ if image:
181
+ st.image(image, caption="Selected Image", use_container_width=True)
182
 
183
+ if st.button("Classify"):
184
+ with st.spinner("Classifying..."):
185
+ try:
186
+ predictions = pipe_classification(image)
187
+ if predictions:
188
+ top_food = predictions[0]['label']
189
+ confidence = predictions[0]['score']
190
+ st.header(f"🍽️ Food: {top_food} ({confidence*100:.2f}% confidence)")
191
 
192
+ # Generate ingredients
193
+ st.subheader("📝 Ingredients")
194
+ try:
195
+ ingredients = get_ingredients_qwen(top_food)
196
+ st.write(ingredients)
197
+ except Exception as e:
198
+ st.error(f"Error generating ingredients: {e}")
199
 
200
+ # Healthier Alternatives
201
+ st.subheader("💡 Healthier Alternatives")
202
+ try:
203
+ # ONLY THIS PART CHANGED:
204
+ # Use the RAG calling method instead of the OpenAI function
205
+ client_rag = Client("https://66cd04274e7fd11327.gradio.live/")
206
+ result = client_rag.predict(query=f"What's a healthy {top_food} recipe, and why is it healthy?", api_name="/get_response")
207
+ st.write(result)
208
+ except OpenAIError as e:
209
+ st.error(f"OpenAI API error: {e}")
210
+ except Exception as e:
211
+ st.error(f"Unable to generate healthier alternatives: {e}")
212
+ else:
213
+ st.error("No predictions returned from the classification model.")
214
+ except Exception as e:
215
+ st.error(f"Error during classification: {e}")
216
 
217
+ else:
218
+ st.info("Please select or upload an image to get started.")