Spaces:
Build error
Build error
Upload 4 files
Browse files- app1.py +80 -0
- model.pkl +3 -0
- requirements.txt +7 -0
- vectorizer.pkl +3 -0
app1.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pickle
|
3 |
+
import string
|
4 |
+
from nltk.corpus import stopwords
|
5 |
+
import nltk
|
6 |
+
from nltk.stem.porter import PorterStemmer
|
7 |
+
ps = PorterStemmer()
|
8 |
+
|
9 |
+
|
10 |
+
# Function to preprocess the input text
|
11 |
+
def transform_text(text):
|
12 |
+
text = text.lower() # Convert to lowercase
|
13 |
+
text = nltk.word_tokenize(text) # Tokenize the text
|
14 |
+
y = []
|
15 |
+
|
16 |
+
# Removing alphanumeric tokens
|
17 |
+
for i in text:
|
18 |
+
if i.isalnum():
|
19 |
+
y.append(i)
|
20 |
+
|
21 |
+
text = y[:]
|
22 |
+
y.clear()
|
23 |
+
|
24 |
+
# Removing stopwords and punctuation
|
25 |
+
for i in text:
|
26 |
+
if i not in stopwords.words('english') and i not in string.punctuation:
|
27 |
+
y.append(i)
|
28 |
+
|
29 |
+
text = y[:]
|
30 |
+
y.clear()
|
31 |
+
|
32 |
+
# Performing stemming
|
33 |
+
for i in text:
|
34 |
+
y.append(ps.stem(i))
|
35 |
+
|
36 |
+
return " ".join(y) # Join the list into a single string with spaces
|
37 |
+
|
38 |
+
|
39 |
+
# Load the saved models (TF-IDF Vectorizer and the classification model)
|
40 |
+
tfidf = pickle.load(open('vectorizer.pkl', 'rb'))
|
41 |
+
model = pickle.load(open('model.pkl', 'rb'))
|
42 |
+
|
43 |
+
# Setting up the main title and description
|
44 |
+
st.title("π§ Email/SMS Spam Classifier")
|
45 |
+
st.write("""
|
46 |
+
### Enter a message to determine whether it's Spam or Not Spam.
|
47 |
+
This classifier uses **natural language processing (NLP)** techniques to preprocess and predict based on your input.
|
48 |
+
""")
|
49 |
+
|
50 |
+
# Input text field for user to enter the message
|
51 |
+
st.write("#### Message Input:")
|
52 |
+
input_sms = st.text_area("Type or paste your message here", height=150)
|
53 |
+
|
54 |
+
# Add a button to trigger the classification
|
55 |
+
if st.button("π Classify Message"):
|
56 |
+
if input_sms.strip(): # Ensure there's text in the input
|
57 |
+
## 1. Preprocess the input text
|
58 |
+
with st.spinner('Processing...'):
|
59 |
+
transformed_sms = transform_text(input_sms)
|
60 |
+
|
61 |
+
## 2. Vectorize the transformed text
|
62 |
+
vector_input = tfidf.transform([transformed_sms])
|
63 |
+
|
64 |
+
## 3. Predict the label (Spam or Not Spam)
|
65 |
+
result = model.predict(vector_input)[0]
|
66 |
+
|
67 |
+
## 4. Display the result with appropriate color and message
|
68 |
+
if result == 1:
|
69 |
+
st.success("π΄ This message is classified as **Spam**.")
|
70 |
+
else:
|
71 |
+
st.success("π’ This message is classified as **Not Spam**.")
|
72 |
+
else:
|
73 |
+
st.warning("Please enter a valid message to classify.")
|
74 |
+
|
75 |
+
# Adding a footer with a reference to your classifier and author
|
76 |
+
st.markdown("""
|
77 |
+
---
|
78 |
+
Developed using **Streamlit** and **NLP techniques**.<br>
|
79 |
+
**Author**: **Aditya Yadav**
|
80 |
+
""", unsafe_allow_html=True)
|
model.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a73cb686f2ea89fa7f52929dbae3d626d21d623acd8944641a44ee28950fefc4
|
3 |
+
size 215264
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
nltk==3.7
|
2 |
+
streamlit==1.12.0
|
3 |
+
scikit-learn==1.1.1
|
4 |
+
pickle
|
5 |
+
string
|
6 |
+
|
7 |
+
# Add any other dependencies here
|
vectorizer.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6a6ca10a0e105f8181b41c391dd2265bd24ab02575fc986ef5bf6b76b30638df
|
3 |
+
size 131964
|