Spaces:
Running
Running
Update pages/21_NLP_Transformer.py
Browse files- pages/21_NLP_Transformer.py +34 -25
pages/21_NLP_Transformer.py
CHANGED
@@ -128,36 +128,45 @@ scheduler = get_linear_schedule_with_warmup(
|
|
128 |
loss_fn = torch.nn.CrossEntropyLoss().to(device)
|
129 |
model = model.to(device)
|
130 |
|
131 |
-
#
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
|
143 |
-
|
144 |
-
|
145 |
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
|
154 |
-
|
155 |
|
156 |
-
# Save the model
|
157 |
-
model.save_pretrained('bert-sentiment-model')
|
158 |
-
tokenizer.save_pretrained('bert-sentiment-model')
|
|
|
159 |
|
160 |
-
# Streamlit app
|
161 |
model = BertForSequenceClassification.from_pretrained('bert-sentiment-model')
|
162 |
tokenizer = BertTokenizer.from_pretrained('bert-sentiment-model')
|
163 |
model = model.eval()
|
|
|
128 |
loss_fn = torch.nn.CrossEntropyLoss().to(device)
|
129 |
model = model.to(device)
|
130 |
|
131 |
+
# Streamlit app
|
132 |
+
st.title("Sentiment Analysis with BERT")
|
133 |
+
st.write("""
|
134 |
+
This application allows you to train a BERT model for sentiment analysis on the IMDb dataset.
|
135 |
+
You can input a movie review and the model will predict whether the sentiment is positive or negative.
|
136 |
+
""")
|
137 |
+
|
138 |
+
if st.button("Train Model"):
|
139 |
+
with st.spinner("Training the model..."):
|
140 |
+
# Training loop
|
141 |
+
for epoch in range(EPOCHS):
|
142 |
+
train_acc, train_loss = train_epoch(
|
143 |
+
model,
|
144 |
+
train_data_loader,
|
145 |
+
loss_fn,
|
146 |
+
optimizer,
|
147 |
+
device,
|
148 |
+
scheduler,
|
149 |
+
len(train_df)
|
150 |
+
)
|
151 |
|
152 |
+
st.write(f'Epoch {epoch + 1}/{EPOCHS}')
|
153 |
+
st.write(f'Train loss {train_loss} accuracy {train_acc}')
|
154 |
|
155 |
+
val_acc, val_loss = eval_model(
|
156 |
+
model,
|
157 |
+
test_data_loader,
|
158 |
+
loss_fn,
|
159 |
+
device,
|
160 |
+
len(test_df)
|
161 |
+
)
|
162 |
|
163 |
+
st.write(f'Val loss {val_loss} accuracy {val_acc}')
|
164 |
|
165 |
+
# Save the model
|
166 |
+
model.save_pretrained('bert-sentiment-model')
|
167 |
+
tokenizer.save_pretrained('bert-sentiment-model')
|
168 |
+
st.success("Model training complete!")
|
169 |
|
|
|
170 |
model = BertForSequenceClassification.from_pretrained('bert-sentiment-model')
|
171 |
tokenizer = BertTokenizer.from_pretrained('bert-sentiment-model')
|
172 |
model = model.eval()
|