zamalali commited on
Commit
97b2775
·
1 Parent(s): 553b5fc

Refactor conversation function to remove commented code and improve placeholder image handling

Browse files
Files changed (1) hide show
  1. app.py +5 -10
app.py CHANGED
@@ -204,7 +204,6 @@ def conversation(
204
  hf_token,
205
  model_path,
206
  ):
207
- # Initialize the LLM based on inputs
208
  if hf_token.strip() != "" and model_path.strip() != "":
209
  llm = HuggingFaceEndpoint(
210
  repo_id=model_path,
@@ -220,7 +219,6 @@ def conversation(
220
  huggingfacehub_api_token=os.getenv("P_HF_TOKEN", "None"),
221
  )
222
 
223
- # Retrieve collections from vector database
224
  text_collection = vectordb_client.get_collection(
225
  "text_db", embedding_function=sentence_transformer_ef
226
  )
@@ -228,19 +226,16 @@ def conversation(
228
  "image_db", embedding_function=sentence_transformer_ef
229
  )
230
 
231
- # Query text context
232
  results = text_collection.query(
233
  query_texts=[msg], include=["documents"], n_results=num_context
234
  )["documents"][0]
235
 
236
- # Query image context
237
  similar_images = image_collection.query(
238
  query_texts=[msg],
239
  include=["metadatas", "distances", "documents"],
240
  n_results=img_context,
241
  )
242
 
243
- # Initialize image links and descriptions
244
  img_links = similar_images["metadatas"][0] if similar_images["metadatas"] else []
245
  images_and_locs = []
246
 
@@ -252,11 +247,12 @@ def conversation(
252
  except Exception as e:
253
  print(f"Error decoding image: {e}")
254
 
255
- # Handle case where no images are found
256
  if not images_and_locs:
257
- images_and_locs = [("path/to/placeholder/image.jpg", "No images found")]
 
 
 
258
 
259
- # Prepare prompt for the LLM
260
  img_desc = "\n".join(similar_images["documents"][0]) if images_and_locs else "No Images Are Provided"
261
  template = """
262
  Context:
@@ -274,14 +270,13 @@ def conversation(
274
  prompt = PromptTemplate(template=template, input_variables=["context", "question"])
275
  context = "\n\n".join(results)
276
 
277
- # Generate response
278
  response = llm(prompt.format(context=context, question=msg, images=img_desc))
279
 
280
- # Return updated history, text results, and image locations
281
  return history + [(msg, response)], results, images_and_locs
282
 
283
 
284
 
 
285
  def check_validity_and_llm(session_states):
286
  if session_states.get("processed", False) == True:
287
  return gr.Tabs(selected=2)
 
204
  hf_token,
205
  model_path,
206
  ):
 
207
  if hf_token.strip() != "" and model_path.strip() != "":
208
  llm = HuggingFaceEndpoint(
209
  repo_id=model_path,
 
219
  huggingfacehub_api_token=os.getenv("P_HF_TOKEN", "None"),
220
  )
221
 
 
222
  text_collection = vectordb_client.get_collection(
223
  "text_db", embedding_function=sentence_transformer_ef
224
  )
 
226
  "image_db", embedding_function=sentence_transformer_ef
227
  )
228
 
 
229
  results = text_collection.query(
230
  query_texts=[msg], include=["documents"], n_results=num_context
231
  )["documents"][0]
232
 
 
233
  similar_images = image_collection.query(
234
  query_texts=[msg],
235
  include=["metadatas", "distances", "documents"],
236
  n_results=img_context,
237
  )
238
 
 
239
  img_links = similar_images["metadatas"][0] if similar_images["metadatas"] else []
240
  images_and_locs = []
241
 
 
247
  except Exception as e:
248
  print(f"Error decoding image: {e}")
249
 
 
250
  if not images_and_locs:
251
+ placeholder_path = "assets/placeholder.jpg"
252
+ if not os.path.exists(placeholder_path):
253
+ raise FileNotFoundError(f"Placeholder image not found at {placeholder_path}")
254
+ images_and_locs = [(placeholder_path, "No images found")]
255
 
 
256
  img_desc = "\n".join(similar_images["documents"][0]) if images_and_locs else "No Images Are Provided"
257
  template = """
258
  Context:
 
270
  prompt = PromptTemplate(template=template, input_variables=["context", "question"])
271
  context = "\n\n".join(results)
272
 
 
273
  response = llm(prompt.format(context=context, question=msg, images=img_desc))
274
 
 
275
  return history + [(msg, response)], results, images_and_locs
276
 
277
 
278
 
279
+
280
  def check_validity_and_llm(session_states):
281
  if session_states.get("processed", False) == True:
282
  return gr.Tabs(selected=2)