Spaces:
Sleeping
Sleeping
File size: 4,119 Bytes
3ad496b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import streamlit as st
import sparknlp
import os
import pandas as pd
from sparknlp.base import *
from sparknlp.annotator import *
from pyspark.ml import Pipeline
from sparknlp.pretrained import PretrainedPipeline
# Page configuration
st.set_page_config(
layout="wide",
initial_sidebar_state="auto"
)
# CSS for styling
st.markdown("""
<style>
.main-title {
font-size: 36px;
color: #4A90E2;
font-weight: bold;
text-align: center;
}
.section p, .section ul {
color: #666666;
}
</style>
""", unsafe_allow_html=True)
@st.cache_resource
def init_spark():
return sparknlp.start()
@st.cache_resource
def create_pipeline(model):
document_assembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
word_embeddings = WordEmbeddingsModel()\
.pretrained('urduvec_140M_300d', 'ur')\
.setInputCols(["sentence",'token'])\
.setOutputCol("word_embeddings")
sentence_embeddings = SentenceEmbeddings() \
.setInputCols(["sentence", "word_embeddings"]) \
.setOutputCol("sentence_embeddings") \
.setPoolingStrategy("AVERAGE")
classifier = SentimentDLModel.pretrained('sentimentdl_urduvec_imdb', 'ur' )\
.setInputCols(['sentence_embeddings'])\
.setOutputCol('sentiment')
nlpPipeline = Pipeline(
stages=[
document_assembler,
sentence_detector,
tokenizer,
word_embeddings,
sentence_embeddings,
classifier ])
return nlpPipeline
def fit_data(pipeline, data):
empty_df = spark.createDataFrame([['']]).toDF('text')
pipeline_model = pipeline.fit(empty_df)
model = LightPipeline(pipeline_model)
results = model.fullAnnotate(data)[0]
return results['sentiment'][0].result
# Set up the page layout
st.markdown('<div class="main-title">State-of-the-Art Urdu Sentiment Detection with Spark NLP</div>', unsafe_allow_html=True)
# Sidebar content
model = st.sidebar.selectbox(
"Choose the pretrained model",
["sentimentdl_urduvec_imdb"],
help="For more info about the models visit: https://sparknlp.org/models"
)
# Reference notebook link in sidebar
link = """
<a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/public/SENTIMENT_UR.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
</a>
"""
st.sidebar.markdown('Reference notebook:')
st.sidebar.markdown(link, unsafe_allow_html=True)
# Load examples
folder_path = f"inputs/{model}"
examples = [
lines[1].strip()
for filename in os.listdir(folder_path)
if filename.endswith('.txt')
for lines in [open(os.path.join(folder_path, filename), 'r', encoding='utf-8').readlines()]
if len(lines) >= 2
]
selected_text = st.selectbox("Select a sample", examples)
custom_input = st.text_input("Try it for yourself!")
if custom_input:
selected_text = custom_input
elif selected_text:
selected_text = selected_text
st.subheader('Selected Text')
st.write(selected_text)
# Initialize Spark and create pipeline
spark = init_spark()
pipeline = create_pipeline(model)
output = fit_data(pipeline, selected_text)
# Display output sentence
if output.lower() in ['pos', 'positive']:
st.markdown("""<h3>This seems like a <span style="color: green">{}</span> text. <span style="font-size:35px;">😃</span></h3>""".format('positive'), unsafe_allow_html=True)
elif output.lower() in ['neg', 'negative']:
st.markdown("""<h3>This seems like a <span style="color: red">{}</span> text. <span style="font-size:35px;">😠</span?</h3>""".format('negative'), unsafe_allow_html=True)
|