ChandraP12330
commited on
Commit
•
1a549b1
1
Parent(s):
bacdfba
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ import torch
|
|
6 |
|
7 |
##BLIP
|
8 |
# Create the caption pipeline
|
9 |
-
|
10 |
|
11 |
# Display the image using Streamlit
|
12 |
uploaded_image = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
|
@@ -14,7 +14,7 @@ if uploaded_image is not None:
|
|
14 |
image= Image.open(uploaded_image)
|
15 |
st.image(image, caption="Uploaded Image", use_column_width=True)
|
16 |
|
17 |
-
initial_caption =
|
18 |
initial_caption = initial_caption[0]['generated_text']
|
19 |
|
20 |
##CLIP
|
|
|
6 |
|
7 |
##BLIP
|
8 |
# Create the caption pipeline
|
9 |
+
initial_caption_pipe = pipeline('image-to-text', model="Salesforce/blip-image-captioning-large")
|
10 |
|
11 |
# Display the image using Streamlit
|
12 |
uploaded_image = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
|
|
|
14 |
image= Image.open(uploaded_image)
|
15 |
st.image(image, caption="Uploaded Image", use_column_width=True)
|
16 |
|
17 |
+
initial_caption = initial_caption_pipe(uploaded_image)
|
18 |
initial_caption = initial_caption[0]['generated_text']
|
19 |
|
20 |
##CLIP
|