File size: 3,331 Bytes
659b71a 7496a0d 659b71a 51aa4b0 7496a0d 51aa4b0 7496a0d 51aa4b0 7496a0d 659b71a 7496a0d 659b71a c9ed9d2 7496a0d 659b71a c9ed9d2 659b71a bdca1f2 9cca1b7 7496a0d c9ed9d2 7496a0d 659b71a 7496a0d 659b71a 7496a0d 659b71a 7496a0d 659b71a 7496a0d 4f186ac 659b71a 7496a0d 659b71a 7496a0d 659b71a 7496a0d 659b71a a28b6b5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import streamlit as st
from PIL import Image
import tensorflow as tf
import numpy as np
from keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
import os
# Load custom CTC Layer if necessary
class CTCLayer(tf.keras.layers.Layer):
def __init__(self, name=None):
super().__init__(name=name)
self.loss_fn = tf.keras.backend.ctc_batch_cost
def call(self, y_true, y_pred, input_length, label_length):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(y_true, y_pred, input_length, label_length)
self.add_loss(loss)
# On test time, just return the computed loss
return loss
# Load the trained model with a custom CTC layer if needed
@st.cache_resource
def load_model():
model_path = "model_ocr.h5" # Update with the correct model file path
model = tf.keras.models.load_model(model_path, custom_objects={"CTCLayer": CTCLayer})
return model
model = load_model()
# Menambahkan definisi img_width dan img_height
img_width, img_height = 200, 50 # Ganti sesuai dimensi input gambar yang digunakan oleh model Anda
# Definisikan max_length (misalnya panjang label maksimal)
max_length = 50 # Ganti sesuai dengan panjang label teks maksimal yang diinginkan
# Function to preprocess the image
def prepare_image(img):
img = img.resize((img_width, img_height)) # Resize to the expected input size for the model
img_array = img_to_array(img)
img_array = np.expand_dims(img_array, axis=0) # Add batch dimension
# The input_length and label_length need to be set according to your data
input_length = np.ones((img_array.shape[0], 1)) * (img_width // 4) # Example input length
label_length = np.ones((img_array.shape[0], 1)) * max_length # Example label length
# Make prediction
preds = model.predict([img_array, input_length, label_length])
# Decode predictions (use your custom decoding function)
pred_texts = decode_batch_predictions(preds)
return pred_texts
# Define a simple batch decoder (adjust as needed)
def decode_batch_predictions(pred):
# This function should convert the predictions (logits) to text
# Modify this function based on your specific character map
pred_texts = []
for i in range(pred.shape[0]):
pred_text = ''.join([characters[int(c)] for c in pred[i] if c != -1]) # Map to characters
pred_texts.append(pred_text)
return pred_texts
def run():
st.title("OCR Model Deployment")
# Upload image
img_file = st.file_uploader("Choose an Image", type=["jpg", "png"])
if img_file is not None:
img = Image.open(img_file).convert('L') # Convert to grayscale if needed
st.image(img, use_column_width=True)
# Save the uploaded image
upload_dir = './upload_images/'
os.makedirs(upload_dir, exist_ok=True)
save_image_path = os.path.join(upload_dir, img_file.name)
with open(save_image_path, "wb") as f:
f.write(img_file.getbuffer())
# Process the image and make prediction
pred_texts = prepare_image(img)
# Show predicted text
st.success(f"**Predicted Text: {pred_texts[0]}**")
if __name__ == "__main__":
run()
|