File size: 6,116 Bytes
dcc0fda
 
 
 
 
 
 
83f1609
f8c9f63
dcb382f
 
 
 
 
 
 
 
f8c9f63
f6969ae
dcb382f
f6969ae
dcc0fda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83f1609
 
 
 
dcc0fda
 
 
 
 
83f1609
 
 
 
 
 
 
dcc0fda
 
 
 
3229f04
dcc0fda
 
 
 
 
3229f04
 
 
 
 
dcc0fda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import streamlit as st 
from time import sleep
from stqdm import stqdm
import pandas as pd
from transformers import pipeline
import json

from datasets import load_dataset

from datasets import load_dataset




data_files = {"DL1": "CORR_TEXT_ZOOM_DL_1.txt", "DL3": "CORR_TEXT_ZOOM_DL_3.txt","DL5":"CORR_TEXT_ZOOM_DL_5.txt","DL7":"CORR_TEXT_ZOOM_DL_7.txt"}
dataset = load_dataset("domro11/lectures", data_files=data_files)


# Load the dataset from the Hugging Face Hub
dataset = load_dataset("domro11/lectures")



def draw_all(
    key,
    plot=False,
):
    st.write(
        """
        # NLP Web App
        
        This Natural Language Processing Based Web App can do anything u can imagine with Text. 😱 
        
        This App is built using pretrained transformers which are capable of doing wonders with the Textual data.
        
        ```python
        # Key Features of this App.
        1. Advanced Text Summarizer
        2. Sentiment Analysis
        3. Question Answering
        4. Text Completion
       
        ```
        """
    )

    

with st.sidebar:
    draw_all("sidebar")


#main function that holds all the options
def main():
    st.title("NLP IE Web App")
    menu = ["--Select--","Summarizer",
            "Sentiment Analysis","Question Answering","Text Completion"]
    choice = st.sidebar.selectbox("What task would you like to do?", menu)
    if choice=="--Select--":
        
        st.write("""
                 
                 Welcome to the the Web App of Data Dynamos. As an IE student of the Master of Business Analyitics and Big Data you have the opportunity to
                 do anything with your lectures you like
        """)
        
        st.write("""
                 
                Never heard of NLP? No way! Natural Language Processing (NLP) is a computational technique
                to process human language in all of it's complexity
        """)
        
        st.write("""
                 
                 NLP is an vital discipline in Artificial Intelligence and keeps growing
        """)
        
        
        st.image('banner_image.jpg')


        
  
    
    
    elif choice=="Summarizer":
        st.subheader("Text Summarization")
        st.write(" Enter the Text you want to summarize !")
        # Create a dropdown to select which document to read
        document_names = [str(document['id']) for document in dataset['documents']]
        document_to_read = st.selectbox("Select a document to read", document_names)
        # Read the selected document
        selected_document = dataset['documents'][int(document_to_read)]
        document_text = selected_document['text']
        raw_text = documents[document_text]
        num_words = st.number_input("Enter Number of Words in Summary")
        
        if raw_text!="" and num_words is not None:
            num_words = int(num_words)
            summarizer = pipeline('summarization',model="philschmid/bart-large-cnn-samsum")
            summary = summarizer(raw_text, min_length=num_words,max_length=50)
            s1 = json.dumps(summary[0])
            d2 = json.loads(s1)
            result_summary = d2['summary_text']
            result_summary = '. '.join(list(map(lambda x: x.strip().capitalize(), result_summary.split('.'))))
            st.write(f"Here's your Summary : {result_summary}")  


       
               
    elif choice=="Sentiment Analysis":
        st.subheader("Sentiment Analysis")
        #loading the pipeline
        sentiment_analysis = pipeline("sentiment-analysis")
        st.write(" Enter the Text below To find out its Sentiment !")

        raw_text = st.text_area("Your Text","Enter Text Here")
        if raw_text !="Enter Text Here":
            result = sentiment_analysis(raw_text)[0]
            sentiment = result['label']
            for _ in stqdm(range(50), desc="Please wait a bit. The model is fetching the results !!"):
                sleep(0.1)
            if sentiment =="POSITIVE":
                st.write("""# This text has a Positive Sentiment.  🤗""")
            elif sentiment =="NEGATIVE":
                st.write("""# This text has a Negative Sentiment. 😤""")
            elif sentiment =="NEUTRAL":
                st.write("""# This text seems Neutral ... 😐""")
    
    elif choice=="Question Answering":
        st.subheader("Question Answering")
        st.write(" Enter the Context and ask the Question to find out the Answer !")
        question_answering = pipeline("question-answering")
        

        context = st.text_area("Context","Enter the Context Here")

        #This is the text box for the question
        question = st.text_area("Your Question","Enter your Question Here")
        
        if context !="Enter Text Here" and question!="Enter your Question Here":
            #we are passing question and the context
            result = question_answering(question=question, context=context)
            #dump the result in json and load it again
            s1 = json.dumps(result)
            d2 = json.loads(s1)
            generated_text = d2['answer']
            #joining and capalizing by dot
            generated_text = '. '.join(list(map(lambda x: x.strip().capitalize(), generated_text.split('.'))))
            st.write(f" Here's your Answer :\n {generated_text}")
    
    elif choice=="Text Completion":
        st.subheader("Text Completion")
        st.write(" Enter the uncomplete Text to complete it automatically using AI !")
        text_generation = pipeline("text-generation")
        message = st.text_area("Your Text","Enter the Text to complete")
        
        
        if message !="Enter the Text to complete":
            generator = text_generation(message)
            s1 = json.dumps(generator[0])
            d2 = json.loads(s1)
            generated_text = d2['generated_text']
            generated_text = '. '.join(list(map(lambda x: x.strip().capitalize(), generated_text.split('.'))))
            st.write(f" Here's your Generate Text :\n   {generated_text}")

    #main function to run
if __name__ == '__main__':
	main()