AyoubChLin commited on
Commit
28d65ee
·
verified ·
1 Parent(s): 8873319

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -12
app.py CHANGED
@@ -1,15 +1,11 @@
1
  import streamlit as st
2
  from keras.models import load_model
3
- import numpy as np
4
- import tensorflow as tf
5
  import nltk
6
  import re
7
  from nltk.corpus import stopwords
8
  from nltk.tokenize import TweetTokenizer
9
- from nltk.tokenize import word_tokenize
10
  from tensorflow.keras.preprocessing.text import Tokenizer
11
  from tensorflow.keras.preprocessing.sequence import pad_sequences
12
-
13
  import subprocess
14
 
15
  # Command to execute
@@ -24,8 +20,9 @@ except subprocess.CalledProcessError as e:
24
 
25
  # Load the LSTM model
26
  model_path = "model.h5" # Set your model path here
27
- lstm_model = load_lstm_model(model_path)
28
 
 
 
29
 
30
  def clean_text(text):
31
  # Remove stopwords
@@ -57,7 +54,6 @@ def clean_text(text):
57
 
58
  return text
59
 
60
-
61
  def preprocess_text(text):
62
  # Clean the text
63
  cleaned_text = clean_text(text)
@@ -70,13 +66,8 @@ def preprocess_text(text):
70
 
71
  return padded_sequences
72
 
73
- # Function to load the saved LSTM model
74
- @st.cache(allow_output_mutation=True)
75
- def load_lstm_model(model_path):
76
- return load_model(model_path)
77
-
78
  # Function to predict hate speech
79
- def predict_hate_speech(text):
80
  # Preprocess the text
81
  padded_sequences = preprocess_text(text)
82
  prediction = lstm_model.predict(padded_sequences)
@@ -91,6 +82,8 @@ def main():
91
 
92
  if st.button("Detect Hate Speech"):
93
  if input_text:
 
 
94
  # Predict hate speech
95
  prediction = predict_hate_speech(input_text, lstm_model)
96
  if prediction > 0.5:
 
1
  import streamlit as st
2
  from keras.models import load_model
 
 
3
  import nltk
4
  import re
5
  from nltk.corpus import stopwords
6
  from nltk.tokenize import TweetTokenizer
 
7
  from tensorflow.keras.preprocessing.text import Tokenizer
8
  from tensorflow.keras.preprocessing.sequence import pad_sequences
 
9
  import subprocess
10
 
11
  # Command to execute
 
20
 
21
  # Load the LSTM model
22
  model_path = "model.h5" # Set your model path here
 
23
 
24
+ def load_lstm_model(model_path):
25
+ return load_model(model_path)
26
 
27
  def clean_text(text):
28
  # Remove stopwords
 
54
 
55
  return text
56
 
 
57
  def preprocess_text(text):
58
  # Clean the text
59
  cleaned_text = clean_text(text)
 
66
 
67
  return padded_sequences
68
 
 
 
 
 
 
69
  # Function to predict hate speech
70
+ def predict_hate_speech(text, lstm_model):
71
  # Preprocess the text
72
  padded_sequences = preprocess_text(text)
73
  prediction = lstm_model.predict(padded_sequences)
 
82
 
83
  if st.button("Detect Hate Speech"):
84
  if input_text:
85
+ # Load the model
86
+ lstm_model = load_lstm_model(model_path)
87
  # Predict hate speech
88
  prediction = predict_hate_speech(input_text, lstm_model)
89
  if prediction > 0.5: