File size: 3,930 Bytes
e78c183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import streamlit as st
import sparknlp
import os
import pandas as pd

from sparknlp.base import *
from sparknlp.annotator import *
from pyspark.ml import Pipeline
from sparknlp.pretrained import PretrainedPipeline
from streamlit_tags import st_tags

# Page configuration
st.set_page_config(
    layout="wide", 
    initial_sidebar_state="auto"
)

# CSS for styling
st.markdown("""

    <style>

        .main-title {

            font-size: 36px;

            color: #4A90E2;

            font-weight: bold;

            text-align: center;

        }

        .section {

            background-color: #f9f9f9;

            padding: 10px;

            border-radius: 10px;

            margin-top: 10px;

        }

        .section p, .section ul {

            color: #666666;

        }

    </style>

""", unsafe_allow_html=True)

@st.cache_resource
def init_spark():
    return sparknlp.start()

@st.cache_resource
def create_pipeline(model):
    image_assembler = ImageAssembler()\
        .setInputCol("image")\
        .setOutputCol("image_assembler")

    imageClassifier = SwinForImageClassification.pretrained()\
        .setInputCols("image_assembler")\
        .setOutputCol("class")

    pipeline = Pipeline(stages=[image_assembler, imageClassifier])
    return pipeline

def fit_data(pipeline, data):
    empty_df = spark.createDataFrame([['']]).toDF('text')
    model = pipeline.fit(empty_df)
    light_pipeline = LightPipeline(model)
    annotations_result = light_pipeline.fullAnnotateImage(data)
    return annotations_result[0]['class'][0].result

def save_uploadedfile(uploadedfile):
    filepath = os.path.join(IMAGE_FILE_PATH, uploadedfile.name)
    with open(filepath, "wb") as f:
        if hasattr(uploadedfile, 'getbuffer'):
            f.write(uploadedfile.getbuffer())
        else:
            f.write(uploadedfile.read())
        
# Sidebar content
model_list = ['image_classifier_swin_base_patch4_window7_224 ', 'image_classifier_swin_base_patch4_window12_384_in22k']
model = st.sidebar.selectbox(
    "Choose the pretrained model",
    model_list,
    help="For more info about the models visit: https://sparknlp.org/models"
)

# Set up the page layout
st.markdown(f'<div class="main-title">Swin For Image Classification</div>', unsafe_allow_html=True)
# st.markdown(f'<div class="section"><p>{sub_title}</p></div>', unsafe_allow_html=True)

# Reference notebook link in sidebar
link = """

<a href="https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/annotation/image/SwinForImageClassification.ipynb">

    <img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>

</a>

"""
st.sidebar.markdown('Reference notebook:')
st.sidebar.markdown(link, unsafe_allow_html=True)

# Load examples
IMAGE_FILE_PATH = f"inputs"
image_files = sorted([file for file in os.listdir(IMAGE_FILE_PATH) if file.split('.')[-1]=='png' or file.split('.')[-1]=='jpg' or file.split('.')[-1]=='JPEG' or file.split('.')[-1]=='jpeg'])

img_options = st.selectbox("Select an image", image_files)
uploadedfile = st.file_uploader("Try it for yourself!")

if uploadedfile:
    file_details = {"FileName":uploadedfile.name,"FileType":uploadedfile.type}
    save_uploadedfile(uploadedfile)
    selected_image = f"{IMAGE_FILE_PATH}/{uploadedfile.name}"
elif img_options:
    selected_image = f"{IMAGE_FILE_PATH}/{img_options}"

st.subheader('Classified Image')

image_size = st.slider('Image Size', 400, 1000, value=400, step = 100)

try:
    st.image(f"{IMAGE_FILE_PATH}/{selected_image}", width=image_size)
except:
    st.image(selected_image, width=image_size)

st.subheader('Classification')

spark = init_spark()
Pipeline = create_pipeline(model)
output = fit_data(Pipeline, selected_image)

st.markdown(f'This document has been classified as  : **{output}**')