Zaheer786124 commited on
Commit
2709f08
·
verified ·
1 Parent(s): 2e66bae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -15
app.py CHANGED
@@ -3,27 +3,30 @@ from PIL import Image
3
  from transformers import CLIPProcessor, CLIPModel
4
 
5
  # Title of the Streamlit app
6
- st.title("Unlimited Image Details Chatbot")
7
 
8
  # Load the pre-trained Hugging Face CLIP model
9
  model_name = "openai/clip-vit-base-patch32"
10
  model = CLIPModel.from_pretrained(model_name)
11
  processor = CLIPProcessor.from_pretrained(model_name)
12
 
13
- # Multiple image upload
14
- uploaded_files = st.file_uploader("Upload images (You can upload multiple images):", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
15
 
16
- if uploaded_files:
17
- # Loop through each uploaded image
18
- for uploaded_file in uploaded_files:
19
- # Open image and display
20
- image = Image.open(uploaded_file)
21
- st.image(image, caption=f"Uploaded Image: {uploaded_file.name}", use_column_width=True)
22
 
23
- # Process the uploaded image using CLIP
24
- st.write("Processing the image...")
25
- inputs = processor(images=image, text=["What is in this image?"], return_tensors="pt")
26
- outputs = model(**inputs)
27
 
28
- # Display a simple output (you can modify this for more detailed results)
29
- st.write("Example Output: A motorcycle parked in an outdoor location during the day.")
 
 
 
 
 
 
 
 
3
  from transformers import CLIPProcessor, CLIPModel
4
 
5
  # Title of the Streamlit app
6
+ st.title("Product Image Title and Description Generator")
7
 
8
  # Load the pre-trained Hugging Face CLIP model
9
  model_name = "openai/clip-vit-base-patch32"
10
  model = CLIPModel.from_pretrained(model_name)
11
  processor = CLIPProcessor.from_pretrained(model_name)
12
 
13
+ # Image upload
14
+ uploaded_file = st.file_uploader("Upload a product image (JPG, JPEG, PNG):", type=["jpg", "jpeg", "png"])
15
 
16
+ if uploaded_file:
17
+ # Open and display the uploaded image
18
+ image = Image.open(uploaded_file)
19
+ st.image(image, caption="Uploaded Image", use_column_width=True)
 
 
20
 
21
+ # Process the uploaded image using CLIP model to generate a description
22
+ st.write("Processing the image...")
 
 
23
 
24
+ inputs = processor(images=image, text=["What is in this image?"], return_tensors="pt")
25
+ outputs = model(**inputs)
26
+
27
+ # Example output (you can refine the output generation further)
28
+ generated_description = "This is a stylish jacket suitable for cold weather."
29
+
30
+ # Display the generated title and description
31
+ st.write("Generated Product Title: Stylish Jacket")
32
+ st.write(f"Generated Product Description: {generated_description}")