kowsiknd commited on
Commit
338d8de
·
1 Parent(s): b3aa083
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -26,9 +26,9 @@ Model was trained on twitter dataset ENCASEH2020 from Founta, A.M et. al. (2018)
26
  giving better result with least number of parameters. The model was trained for 10 epochs with batch size of 32 and AdamW optimizer with learning rate of 1e-2 and loss as cross entropy.
27
  """)
28
 
29
- st.image("./images/train_val_accuracy.png [4]", caption="Train and Validation Accuracy", use_column_width=True)
30
- st.image("./images/train_test_scores.png [4]", caption="Classification Report", use_column_width=True)
31
- st.image("./images/confusion_matrix.png [4]", caption="Confusion Matrix", use_column_width=True)
32
 
33
  st.subheader("References")
34
  st.markdown("1. [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805)")
 
26
  giving better result with least number of parameters. The model was trained for 10 epochs with batch size of 32 and AdamW optimizer with learning rate of 1e-2 and loss as cross entropy.
27
  """)
28
 
29
+ st.image("./images/train_val_accuracy.png", caption="Train and Validation Accuracy", use_column_width=True)
30
+ st.image("./images/train_test_scores.png", caption="Classification Report", use_column_width=True)
31
+ st.image("./images/confusion_matrix.png", caption="Confusion Matrix", use_column_width=True)
32
 
33
  st.subheader("References")
34
  st.markdown("1. [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805)")