Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
import tensorflow as tf
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
import seaborn as sns
|
7 |
+
from nltk.corpus import stopwords
|
8 |
+
from wordcloud import WordCloud, STOPWORDS
|
9 |
+
import re
|
10 |
+
import nltk
|
11 |
+
nltk.download('punkt')
|
12 |
+
from sklearn.model_selection import train_test_split
|
13 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
14 |
+
from sklearn.preprocessing import LabelEncoder
|
15 |
+
from tensorflow.keras.models import load_model
|
16 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
17 |
+
from tensorflow.keras.preprocessing.text import Tokenizer
|
18 |
+
from tensorflow.keras.initializers import Orthogonal
|
19 |
+
|
20 |
+
# Load the dataset
|
21 |
+
df = pd.read_csv("Twitter_Data.csv")
|
22 |
+
|
23 |
+
# Load the model with custom objects
|
24 |
+
custom_objects = {
|
25 |
+
'Orthogonal': Orthogonal,
|
26 |
+
# Add other custom objects if needed
|
27 |
+
}
|
28 |
+
|
29 |
+
model = load_model('sentiment_analysis_model.h5', custom_objects=custom_objects)
|
30 |
+
|
31 |
+
# Preprocess the data
|
32 |
+
def preprocess_text(text):
|
33 |
+
text = re.sub(r'http\S+', '', text) # remove URLs
|
34 |
+
text = re.sub(r'[^A-Za-z0-9 ]+', '', text) # remove special characters
|
35 |
+
text = text.lower() # convert to lowercase
|
36 |
+
return text
|
37 |
+
|
38 |
+
df['clean_text'] = df['text'].apply(preprocess_text)
|
39 |
+
|
40 |
+
# Tokenization and padding
|
41 |
+
tokenizer = Tokenizer()
|
42 |
+
tokenizer.fit_on_texts(df['clean_text'])
|
43 |
+
max_length = 100
|
44 |
+
vocab_size = len(tokenizer.word_index) + 1
|
45 |
+
|
46 |
+
def predict_sentiment(text):
|
47 |
+
text = preprocess_text(text)
|
48 |
+
sequence = tokenizer.texts_to_sequences([text])
|
49 |
+
padded_sequence = pad_sequences(sequence, maxlen=max_length)
|
50 |
+
prediction = model.predict(padded_sequence)
|
51 |
+
sentiment = np.argmax(prediction)
|
52 |
+
sentiment_labels = {0: 'Negative', 1: 'Neutral', 2: 'Positive'}
|
53 |
+
return sentiment_labels[sentiment]
|
54 |
+
|
55 |
+
# Gradio interface
|
56 |
+
iface = gr.Interface(
|
57 |
+
fn=predict_sentiment,
|
58 |
+
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter text here..."),
|
59 |
+
outputs="text",
|
60 |
+
title="Sentiment Analysis",
|
61 |
+
description="Enter text to predict its sentiment"
|
62 |
+
)
|
63 |
+
|
64 |
+
if __name__ == "__main__":
|
65 |
+
iface.launch()
|