mgbam commited on
Commit
efc6213
·
verified ·
1 Parent(s): 1df70ca

Update image_pipeline.py

Browse files
Files changed (1) hide show
  1. image_pipeline.py +23 -16
image_pipeline.py CHANGED
@@ -1,16 +1,23 @@
1
- from transformers import pipeline
2
-
3
- def load_image_model():
4
- """
5
- Loads HuggingFaceTB/SmolVLM-500M-Instruct or another image-to-text model.
6
- """
7
- return pipeline("image-to-text", model="HuggingFaceTB/SmolVLM-500M-Instruct")
8
-
9
- def analyze_image(image_file, image_model):
10
- """
11
- Pass an image file to the image model pipeline and return the text/caption.
12
- """
13
- result = image_model(image_file)
14
- if isinstance(result, list) and len(result) > 0:
15
- return result[0].get("generated_text", "No caption generated.")
16
- return "Unable to process image."
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+ from config import IMAGE_MODEL_NAME
4
+
5
+ @st.cache_resource
6
+ def load_image_model():
7
+ """
8
+ Loads an image captioning model recognized by the HF pipeline.
9
+ Example: "nlpconnect/vit-gpt2-image-captioning" or "Salesforce/blip-image-captioning-base".
10
+ """
11
+ return pipeline("image-to-text", model=IMAGE_MODEL_NAME)
12
+
13
+ def analyze_image(image_file, image_model):
14
+ """
15
+ Pass an uploaded image to the loaded pipeline for caption generation.
16
+ """
17
+ try:
18
+ result = image_model(image_file)
19
+ if isinstance(result, list) and len(result) > 0:
20
+ return result[0].get("generated_text", "No caption.")
21
+ return "No output from the model."
22
+ except Exception as e:
23
+ return f"Error analyzing image: {str(e)}"