peterkros commited on
Commit
0f6885b
·
1 Parent(s): b9a616c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -39
app.py CHANGED
@@ -27,46 +27,68 @@ markdown_text = """
27
  - Input one budget line per time.
28
  - Accuracy of the model is ~88%.
29
  """
30
- # HTML formatted table
31
  html_table = """
32
- <table>
33
- <tr>
34
- <th>Epoch</th>
35
- <th>Training Loss</th>
36
- <th>Validation Loss</th>
37
- <th>Accuracy</th>
38
- </tr>
39
- <tr>
40
- <td>1</td>
41
- <td>No log</td>
42
- <td>2.095209</td>
43
- <td>0.340764</td>
44
- </tr>
45
- <tr>
46
- <td>2</td>
47
- <td>No log</td>
48
- <td>1.419945</td>
49
- <td>0.662420</td>
50
- </tr>
51
- <tr>
52
- <td>3</td>
53
- <td>No log</td>
54
- <td>0.683810</td>
55
- <td>0.850318</td>
56
- </tr>
57
- <tr>
58
- <td>4</td>
59
- <td>No log</td>
60
- <td>0.460408</td>
61
- <td>0.872611</td>
62
- </tr>
63
- <tr>
64
- <td>5</td>
65
- <td>No log</td>
66
- <td>0.422096</td>
67
- <td>0.888535</td>
68
- </tr>
69
- </table>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  """
71
 
72
  iface = gr.Interface(
 
27
  - Input one budget line per time.
28
  - Accuracy of the model is ~88%.
29
  """
 
30
  html_table = """
31
+ <h2 style="text-align: center;">COFOG Budget Classification</h2>
32
+ <p style='text-align: justify'>
33
+ This classifier was developed utilizing the pre-trained BERT
34
+ (Bidirectional Encoder Representations from Transformers) model
35
+ with an uncased configuration, with over 1500 manually
36
+ labeled dataset comprising budget line items extracted from
37
+ various budgetary documents. To balance the data, additional data
38
+ was generated using GPT-4 where categories were not available
39
+ in budget documents. The model training was executed
40
+ on a Google Colab environment, specifically utilizing a Tesla T4 GPU.
41
+ Detailed metrics of the training process are as follows:
42
+ <code>TrainOutput(global_step=395, training_loss=1.1497593360611156,
43
+ metrics={'train_runtime': 650.0119, 'train_samples_per_second':
44
+ 9.638, 'train_steps_per_second': 0.608, 'total_flos': 1648509163714560.0,
45
+ 'train_loss': 1.1497593360611156, 'epoch': 5.0})</code>. The model
46
+ is designed to predict the primary classification level
47
+ of the Classification of the Functions of Government (COFOG),
48
+ with the predictions from the first level serving as contextual
49
+ input for subsequent second-level classification. The project
50
+ is conducted with an exclusive focus on academic and research
51
+ objectives.
52
+ </p>
53
+ <table style="margin-left: auto; margin-right: auto;">
54
+ <tr>
55
+ <th>Epoch</th>
56
+ <th>Training Loss</th>
57
+ <th>Validation Loss</th>
58
+ <th>Accuracy</th>
59
+ </tr>
60
+ <tr>
61
+ <td>1</td>
62
+ <td>No log</td>
63
+ <td>2.095209</td>
64
+ <td>0.340764</td>
65
+ </tr>
66
+ <tr>
67
+ <td>2</td>
68
+ <td>No log</td>
69
+ <td>1.419945</td>
70
+ <td>0.662420</td>
71
+ </tr>
72
+ <tr>
73
+ <td>3</td>
74
+ <td>No log</td>
75
+ <td>0.683810</td>
76
+ <td>0.850318</td>
77
+ </tr>
78
+ <tr>
79
+ <td>4</td>
80
+ <td>No log</td>
81
+ <td>0.460408</td>
82
+ <td>0.872611</td>
83
+ </tr>
84
+ <tr>
85
+ <td>5</td>
86
+ <td>No log</td>
87
+ <td>0.422096</td>
88
+ <td>0.888535</td>
89
+ </tr>
90
+ </table>
91
+ </div>
92
  """
93
 
94
  iface = gr.Interface(