import streamlit as st
import time
import easyocr
import math
from pathlib import Path
from PIL import Image, ImageDraw
import PIL
import io
import os
import cv2
import numpy as np
import shutil
import base64
import logging
st.set_page_config(
page_title="Inpaint Me",
page_icon=":art:",
layout="wide",
initial_sidebar_state="expanded",
menu_items={
'Get Help': 'https://www.extremelycoolapp.com/help',
'Report a bug': "https://www.extremelycoolapp.com/bug",
'About': "# This is a header. This is an *extremely* cool app!"
}
)
# @st.cache_data(show_spinner=False, suppress_st_warning=True)
@st.cache_resource(show_spinner=False)
def load_models():
#specify shortform of language you want to extract,
# I am using Spanish(es) and English(en) here by list of language ids
reader = easyocr.Reader(['en'],)
return reader
reader = load_models()
def midpoint(x1, y1, x2, y2):
x_mid = int((x1 + x2)/2)
y_mid = int((y1 + y2)/2)
return (x_mid, y_mid)
def inpaint_text(img, text_coordinates):
# read image
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# generate (word, box) tuples
mask = np.zeros(img.shape[:2], dtype="uint8")
for box in text_coordinates:
x0, y0 = box[0]
x1, y1 = box[1]
x2, y2 = box[2]
x3, y3 = box[3]
x_mid0, y_mid0 = midpoint(x1, y1, x2, y2)
x_mid1, y_mi1 = midpoint(x0, y0, x3, y3)
thickness = int(math.sqrt( (x2 - x1)**2 + (y2 - y1)**2 ))
cv2.line(mask, (x_mid0, y_mid0), (x_mid1, y_mi1), 255,
thickness)
img = cv2.inpaint(img, mask, 7, cv2.INPAINT_NS)
return(img)
def file_selector(folder_path='.'):
filenames = os.listdir(folder_path)
selected_filename = st.selectbox('Select a file', filenames)
return os.path.join(folder_path, selected_filename), selected_filename
st.markdown(
"""
""",
unsafe_allow_html=True
)
LOGO_IMAGE = "inpaint_me_logo.png"
col1, col2= st.columns([2, 2])
with col1:
# st.image('./aida_logo.png')
st.markdown(
f"""
""",
unsafe_allow_html=True
)
with col2:
# st.image('./aida_logo.png')
st.markdown(
f"""
""",
unsafe_allow_html=True
)
st.header("")
with st.expander("Project Description", expanded=False):
st.write("""
Developed in Applied Intelligence and Data Analysis ([AI+DA](http://aida.etsisi.upm.es/)) group at Polytech University of Madrid (UPM).
To rule out the possibility of text misleading image Deep Learning models (e.g., CNNs) it is useful to remove text from images. Hence,
this tool uses [EasyOCR](https://github.com/JaidedAI/EasyOCR) and [OpenCV](https://pypi.org/project/opencv-python/) for detecting texts and inpainting them. Currently, only `JPG` files are supported. This tools has been tested on memes, feel free to try some examples or upload your own images.
""")
file_example_path = None
if st.checkbox('Select a example'):
folder_path = './Examples/'
# if st.checkbox('Change directory'):
# folder_path = st.text_input('Enter folder path', '.')
file_example_path, example_file_name = file_selector(folder_path=folder_path)
st.write('You selected `%s`' % file_example_path)
uploaded_file = st.file_uploader(label="Upload image",
type=["jpg", "jpeg"],
accept_multiple_files=False,
key=None,
help=None,
on_change=None,
args=None,
kwargs=None,
)
col1, col2, col3 = st.columns([2, 0.5, 2])
if file_example_path and not uploaded_file:
with col1:
st.subheader("Original")
# st.write(f"./Examples_inpainted/{example_file_name.strip(".jpg")}_inpainted.jpeg")
img = Image.open( file_example_path )
st.image(img, caption=None, width=None, use_column_width=None, clamp=False, channels="RGB", output_format="auto")
with col3:
st.subheader("Inpainted")
with st.spinner('Wait for it...'):
time.sleep(1)
example_file_name = example_file_name.strip(".jpg")
inpaint_image = f"./Examples_inpainted/{example_file_name}_inpaint.jpeg"
# img_array = np.array(Image.open( file_example_path ))
# # detect text
# bounds = reader.readtext(img_array, detail=1) #detail=1 # [(coordinates, detected text, confidence threshold)]
# text_coordinates = [ bound[0] for bound in bounds]
# # inpaint text coordinates
# inpaint_image = inpaint_text(img_array, text_coordinates)
st.image(inpaint_image, caption=None, width=None, use_column_width=None, clamp=False, channels="RGB", output_format="auto")
if uploaded_file:
with col1:
st.subheader("Original")
st.image(uploaded_file, caption=None, width=None, use_column_width=None, clamp=False, channels="RGB", output_format="auto")
with col3:
st.subheader("Inpainted")
with st.spinner('Wait for it...'):
# Transform loaded file to bytes
bytes_data = uploaded_file.getvalue()
# bytes to numpy array
img_array = np.array(Image.open(io.BytesIO(bytes_data)))
# detect text
bounds = reader.readtext(img_array, detail=1) #detail=1 # [(coordinates, detected text, confidence threshold)]
text_coordinates = [ bound[0] for bound in bounds]
# inpaint text coordinates
inpaint_image = inpaint_text(img_array, text_coordinates)
st.image(inpaint_image, caption=None, width=None, use_column_width=None, clamp=False, channels="RGB", output_format="auto")