Jekyll2000's picture
Create app.py
2f98bd3 verified
raw
history blame
1.97 kB
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import gradio as gr
# Load the IMDb dataset
imdb = tf.keras.datasets.imdb
vocab_size = 10000
maxlen = 100
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=vocab_size)
X_train = pad_sequences(X_train, maxlen=maxlen)
X_test = pad_sequences(X_test, maxlen=maxlen)
# Define the model
model = models.Sequential([
layers.Embedding(vocab_size, 16, input_length=maxlen),
layers.GlobalAveragePooling1D(),
layers.Dense(16, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=10, batch_size=512, validation_data=(X_test, y_test), verbose=1)
# Save the model
model.save("sentiment_analysis_model.h5")
# Function to predict sentiment
def predict_sentiment(text):
tokenizer = Tokenizer(num_words=vocab_size)
tokenizer.fit_on_texts([text])
sequence = tokenizer.texts_to_sequences([text])
padded_sequence = pad_sequences(sequence, maxlen=maxlen)
prediction = model.predict(padded_sequence)[0][0]
sentiment = "Positive" if prediction >= 0.5 else "Negative"
confidence = round(prediction, 4)
return sentiment, confidence
# Gradio Interface
def gradio_predict(text):
sentiment, confidence = predict_sentiment(text)
return f"Sentiment: {sentiment}, Confidence: {confidence:.4f}"
# Create Gradio Interface
interface = gr.Interface(fn=gradio_predict,
inputs=gr.Textbox(lines=2, placeholder="Enter your text here..."),
outputs="text",
title="Sentiment Analysis",
description="Enter a movie review or any text to analyze its sentiment.")
# Launch the Gradio Interface
interface.launch()