Spaces:
Sleeping
Sleeping
update
Browse files- app.py +20 -13
- requirements.txt +1 -0
app.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
import streamlit as st
|
2 |
from PIL import Image
|
3 |
from transformers import PaliGemmaForConditionalGeneration, PaliGemmaProcessor
|
|
|
4 |
|
5 |
-
st.title("
|
6 |
|
7 |
|
8 |
uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
@@ -12,6 +13,18 @@ model_id = "Fer14/paligemma_coffe_machine_caption"
|
|
12 |
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
|
13 |
processor = PaliGemmaProcessor.from_pretrained(model_id)
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
prompt = (
|
16 |
f"Generate a caption for the following coffee maker image. The caption has to be of the following structure:\n"
|
17 |
"\"A <color> <type>, <accessories>, <shape> shaped, with <screen> and <number> <b_color> butons\"\n\n"
|
@@ -37,22 +50,16 @@ if uploaded_image is not None:
|
|
37 |
padding="longest",
|
38 |
)
|
39 |
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
41 |
out = processor.decode(output[0], skip_special_tokens=True)[len(prompt) :]
|
42 |
|
43 |
-
# Extract text from the image
|
44 |
-
st.write("Extracting text from the image...")
|
45 |
# Display the extracted text
|
46 |
st.text_area("Coffe machine description", out, height=300)
|
47 |
|
48 |
|
49 |
|
50 |
-
# Instructions for Tesseract OCR
|
51 |
-
st.sidebar.title("Instructions")
|
52 |
-
st.sidebar.write(
|
53 |
-
"""
|
54 |
-
1. Upload an image using the file uploader.
|
55 |
-
2. Wait for the app to process and extract text from the image.
|
56 |
-
3. The extracted text will be displayed in the text area.
|
57 |
-
"""
|
58 |
-
)
|
|
|
1 |
import streamlit as st
|
2 |
from PIL import Image
|
3 |
from transformers import PaliGemmaForConditionalGeneration, PaliGemmaProcessor
|
4 |
+
from tqdm import tqdm
|
5 |
|
6 |
+
st.title("Coffe machine captioning app")
|
7 |
|
8 |
|
9 |
uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
|
|
13 |
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
|
14 |
processor = PaliGemmaProcessor.from_pretrained(model_id)
|
15 |
|
16 |
+
|
17 |
+
# Instructions for Tesseract OCR
|
18 |
+
st.sidebar.title("Instructions")
|
19 |
+
st.sidebar.write(
|
20 |
+
"""
|
21 |
+
1. Upload an image using the file uploader.
|
22 |
+
2. Wait for the app to process and extract text from the image.
|
23 |
+
3. The extracted text will be displayed in the text area.
|
24 |
+
"""
|
25 |
+
)
|
26 |
+
|
27 |
+
|
28 |
prompt = (
|
29 |
f"Generate a caption for the following coffee maker image. The caption has to be of the following structure:\n"
|
30 |
"\"A <color> <type>, <accessories>, <shape> shaped, with <screen> and <number> <b_color> butons\"\n\n"
|
|
|
50 |
padding="longest",
|
51 |
)
|
52 |
|
53 |
+
|
54 |
+
st.write("Generating caption for the image...")
|
55 |
+
with tqdm(total=100) as pbar:
|
56 |
+
output = model.generate(**inputs, max_length=1000)
|
57 |
+
pbar.update(100)
|
58 |
+
|
59 |
out = processor.decode(output[0], skip_special_tokens=True)[len(prompt) :]
|
60 |
|
|
|
|
|
61 |
# Display the extracted text
|
62 |
st.text_area("Coffe machine description", out, height=300)
|
63 |
|
64 |
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
transformers @ git+https://github.com/huggingface/transformers.git
|
2 |
datasets
|
3 |
accelerate
|
|
|
|
1 |
transformers @ git+https://github.com/huggingface/transformers.git
|
2 |
datasets
|
3 |
accelerate
|
4 |
+
tqdm
|