|
import streamlit as st |
|
import pytesseract |
|
import torch |
|
from PIL import Image |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
st.title(':blue[_SnapCode_]') |
|
st.markdown("_Extract code blocks out of Screenshots and Images_") |
|
|
|
with st.spinner('Code vs Natuaral language - Classification model is loading'): |
|
model_id = "vishnun/codenlbert-tiny" |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
model = AutoModelForSequenceClassification.from_pretrained(model_id) |
|
|
|
st.success('Model loaded') |
|
|
|
def classify_text(text): |
|
input_ids = tokenizer(text, return_tensors="pt") |
|
with torch.no_grad(): |
|
logits = model(**input_ids).logits |
|
|
|
predicted_class_id = logits.argmax().item() |
|
|
|
return model.config.id2label[predicted_class_id] |
|
|
|
uploaded_file = st.file_uploader("Upload Image from which code needs to be extracted", type= ['png', 'jpeg', 'jpg']) |
|
|
|
if uploaded_file is not None: |
|
img = Image.open(uploaded_file) |
|
ocr_list = [x for x in pytesseract.image_to_string(img).split("\n") if x != ''] |
|
ocr_class = [classify_text(x) for x in ocr_list] |
|
idx = [] |
|
for i in range(len(ocr_class)): |
|
if ocr_class[i].upper() == 'CODE': |
|
idx.append(ocr_list[i]) |
|
|
|
|
|
st.markdown('**Uploaded Image**') |
|
st.image(img, caption='Uploaded Image') |
|
st.markdown("**Retrieved Code Block**") |
|
st.code(("\n").join(idx), language="python", line_numbers=False) |