Zaheer786124 commited on
Commit
3bf1c23
·
verified ·
1 Parent(s): 2709f08

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -14
app.py CHANGED
@@ -1,14 +1,13 @@
1
  import streamlit as st
2
  from PIL import Image
3
- from transformers import CLIPProcessor, CLIPModel
4
 
5
  # Title of the Streamlit app
6
- st.title("Product Image Title and Description Generator")
7
 
8
- # Load the pre-trained Hugging Face CLIP model
9
- model_name = "openai/clip-vit-base-patch32"
10
- model = CLIPModel.from_pretrained(model_name)
11
- processor = CLIPProcessor.from_pretrained(model_name)
12
 
13
  # Image upload
14
  uploaded_file = st.file_uploader("Upload a product image (JPG, JPEG, PNG):", type=["jpg", "jpeg", "png"])
@@ -18,15 +17,18 @@ if uploaded_file:
18
  image = Image.open(uploaded_file)
19
  st.image(image, caption="Uploaded Image", use_column_width=True)
20
 
21
- # Process the uploaded image using CLIP model to generate a description
22
  st.write("Processing the image...")
23
 
24
- inputs = processor(images=image, text=["What is in this image?"], return_tensors="pt")
25
- outputs = model(**inputs)
 
26
 
27
- # Example output (you can refine the output generation further)
28
- generated_description = "This is a stylish jacket suitable for cold weather."
 
 
 
 
 
29
 
30
- # Display the generated title and description
31
- st.write("Generated Product Title: Stylish Jacket")
32
- st.write(f"Generated Product Description: {generated_description}")
 
1
  import streamlit as st
2
  from PIL import Image
3
+ from transformers import BlipProcessor, BlipForConditionalGeneration
4
 
5
  # Title of the Streamlit app
6
+ st.title("Image and Text Combined in One Message")
7
 
8
+ # Load the pre-trained BLIP model
9
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
10
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
 
11
 
12
  # Image upload
13
  uploaded_file = st.file_uploader("Upload a product image (JPG, JPEG, PNG):", type=["jpg", "jpeg", "png"])
 
17
  image = Image.open(uploaded_file)
18
  st.image(image, caption="Uploaded Image", use_column_width=True)
19
 
20
+ # Generate the description using BLIP model
21
  st.write("Processing the image...")
22
 
23
+ # Process the image and generate a detailed description
24
+ inputs = processor(images=image, return_tensors="pt")
25
+ out = model.generate(**inputs)
26
 
27
+ # Decode and display the description
28
+ generated_description = processor.decode(out[0], skip_special_tokens=True)
29
+
30
+ # Combine Image and Text in One Message
31
+ st.markdown(f"**Generated Product Description:** {generated_description}")
32
+ st.markdown(f"**Here is your product image:**")
33
+ st.image(image, caption="Generated Product Image", use_column_width=True)
34