Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- .gitignore +166 -0
- .pre-commit-config.yaml +17 -0
- LICENSE +21 -0
- Makefile +15 -0
- NewsClassifier.egg-info/PKG-INFO +6 -0
- NewsClassifier.egg-info/SOURCES.txt +16 -0
- NewsClassifier.egg-info/dependency_links.txt +1 -0
- NewsClassifier.egg-info/requires.txt +34 -0
- NewsClassifier.egg-info/top_level.txt +1 -0
- README.md +4 -8
- app.py +50 -0
- artifacts/model.pt +3 -0
- dataset/preprocessed/test.csv +0 -0
- dataset/preprocessed/train.csv +0 -0
- dataset/raw/news_dataset.csv +3 -0
- docs/index.md +35 -0
- docs/newsclassifier/config.md +1 -0
- docs/newsclassifier/data.md +1 -0
- docs/newsclassifier/inference.md +1 -0
- docs/newsclassifier/models.md +1 -0
- docs/newsclassifier/train.md +1 -0
- docs/newsclassifier/tune.md +1 -0
- docs/newsclassifier/utils.md +1 -0
- logs/error.log +0 -0
- logs/info.log +186 -0
- mkdocs.yml +20 -0
- newsclassifier/__init__.py +0 -0
- newsclassifier/__pycache__/__init__.cpython-310.pyc +0 -0
- newsclassifier/__pycache__/config.cpython-310.pyc +0 -0
- newsclassifier/__pycache__/data.cpython-310.pyc +0 -0
- newsclassifier/__pycache__/models.cpython-310.pyc +0 -0
- newsclassifier/__pycache__/predict.cpython-310.pyc +0 -0
- newsclassifier/__pycache__/serve.cpython-310.pyc +0 -0
- newsclassifier/config/__init__.py +0 -0
- newsclassifier/config/__pycache__/__init__.cpython-310.pyc +0 -0
- newsclassifier/config/__pycache__/config.cpython-310.pyc +0 -0
- newsclassifier/config/config.py +265 -0
- newsclassifier/config/sweep_config.yaml +17 -0
- newsclassifier/data.py +197 -0
- newsclassifier/inference.py +54 -0
- newsclassifier/models.py +60 -0
- newsclassifier/predict.py +32 -0
- newsclassifier/train.py +151 -0
- newsclassifier/tune.py +85 -0
- newsclassifier/utils.py +20 -0
- notebooks/eda.ipynb +257 -0
- notebooks/newsclassifier-roberta-base-wandb-track-sweep.ipynb +1035 -0
- requirements.txt +34 -0
- setup.py +23 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
dataset/raw/news_dataset.csv filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
#.idea/
|
161 |
+
|
162 |
+
# make
|
163 |
+
Makefile
|
164 |
+
|
165 |
+
# artifacts
|
166 |
+
artifacts/
|
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# See https://pre-commit.com for more information
|
2 |
+
# See https://pre-commit.com/hooks.html for more hooks
|
3 |
+
repos:
|
4 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
5 |
+
rev: v4.5.0
|
6 |
+
hooks:
|
7 |
+
- id: trailing-whitespace
|
8 |
+
exclude: "docs/index.md"
|
9 |
+
- id: check-yaml
|
10 |
+
- repo: local
|
11 |
+
hooks:
|
12 |
+
- id: style
|
13 |
+
name: Style
|
14 |
+
entry: make
|
15 |
+
args: ["style"]
|
16 |
+
language: system
|
17 |
+
pass_filenames: false
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Manish Wahale
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
Makefile
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ifeq ($(OS), Windows_NT)
|
2 |
+
# Styling
|
3 |
+
.PHONY: style
|
4 |
+
style:
|
5 |
+
black . --line-length 150
|
6 |
+
isort . -rc
|
7 |
+
flake8 . --exit-zero
|
8 |
+
else
|
9 |
+
# Styling
|
10 |
+
.PHONY: style
|
11 |
+
style:
|
12 |
+
python3 -m black . --line-length 150
|
13 |
+
python3 -m isort . -rc
|
14 |
+
python3 -m flake8 . --exit-zero
|
15 |
+
endif
|
NewsClassifier.egg-info/PKG-INFO
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: NewsClassifier
|
3 |
+
Version: 1.0
|
4 |
+
Author: ManishW
|
5 |
+
Author-email: [email protected]
|
6 |
+
License-File: LICENSE
|
NewsClassifier.egg-info/SOURCES.txt
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
LICENSE
|
2 |
+
README.md
|
3 |
+
setup.py
|
4 |
+
NewsClassifier.egg-info/PKG-INFO
|
5 |
+
NewsClassifier.egg-info/SOURCES.txt
|
6 |
+
NewsClassifier.egg-info/dependency_links.txt
|
7 |
+
NewsClassifier.egg-info/requires.txt
|
8 |
+
NewsClassifier.egg-info/top_level.txt
|
9 |
+
newsclassifier/__init__.py
|
10 |
+
newsclassifier/data.py
|
11 |
+
newsclassifier/inference.py
|
12 |
+
newsclassifier/models.py
|
13 |
+
newsclassifier/train.py
|
14 |
+
newsclassifier/tune.py
|
15 |
+
newsclassifier/config/__init__.py
|
16 |
+
newsclassifier/config/config.py
|
NewsClassifier.egg-info/dependency_links.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
|
NewsClassifier.egg-info/requires.txt
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiosignal==1.3.1
|
2 |
+
attrs==23.1.0
|
3 |
+
certifi==2023.7.22
|
4 |
+
charset-normalizer==3.3.1
|
5 |
+
click==8.1.7
|
6 |
+
colorama==0.4.6
|
7 |
+
contourpy==1.1.1
|
8 |
+
cycler==0.12.1
|
9 |
+
filelock==3.12.4
|
10 |
+
fonttools==4.43.1
|
11 |
+
frozenlist==1.4.0
|
12 |
+
idna==3.4
|
13 |
+
jsonschema==4.19.1
|
14 |
+
jsonschema-specifications==2023.7.1
|
15 |
+
kiwisolver==1.4.5
|
16 |
+
matplotlib==3.8.0
|
17 |
+
msgpack==1.0.7
|
18 |
+
numpy==1.26.1
|
19 |
+
packaging==23.2
|
20 |
+
pandas==2.1.2
|
21 |
+
Pillow==10.1.0
|
22 |
+
protobuf==4.24.4
|
23 |
+
pyparsing==3.1.1
|
24 |
+
python-dateutil==2.8.2
|
25 |
+
pytz==2023.3.post1
|
26 |
+
PyYAML==6.0.1
|
27 |
+
ray==2.7.1
|
28 |
+
referencing==0.30.2
|
29 |
+
requests==2.31.0
|
30 |
+
rpds-py==0.10.6
|
31 |
+
seaborn==0.13.0
|
32 |
+
six==1.16.0
|
33 |
+
tzdata==2023.3
|
34 |
+
urllib3==2.0.7
|
NewsClassifier.egg-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
newsclassifier
|
README.md
CHANGED
@@ -1,12 +1,8 @@
|
|
1 |
---
|
2 |
-
title: News
|
3 |
-
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: purple
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.0.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
|
|
|
1 |
---
|
2 |
+
title: News-Classifier
|
3 |
+
app_file: app.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 4.0.2
|
|
|
|
|
6 |
---
|
7 |
+
# NewsClassifier
|
8 |
+
See docs here: [NewsClassifier Docs](https://ManishW315.github.io/NewsClassifier/)
|
app.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
import torch
|
5 |
+
from newsclassifier.config.config import Cfg, logger
|
6 |
+
from newsclassifier.data import prepare_input
|
7 |
+
from newsclassifier.models import CustomModel
|
8 |
+
from transformers import RobertaTokenizer
|
9 |
+
|
10 |
+
labels = list(Cfg.index_to_class.values())
|
11 |
+
|
12 |
+
# load and compile the model
|
13 |
+
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
|
14 |
+
model = CustomModel(num_classes=7)
|
15 |
+
model.load_state_dict(torch.load(os.path.join(Cfg.artifacts_path, "model.pt"), map_location=torch.device("cpu")))
|
16 |
+
|
17 |
+
|
18 |
+
def prediction(text):
|
19 |
+
sample_input = prepare_input(tokenizer, text)
|
20 |
+
input_ids = torch.unsqueeze(sample_input["input_ids"], 0).to("cpu")
|
21 |
+
attention_masks = torch.unsqueeze(sample_input["attention_mask"], 0).to("cpu")
|
22 |
+
test_sample = dict(input_ids=input_ids, attention_mask=attention_masks)
|
23 |
+
|
24 |
+
with torch.no_grad():
|
25 |
+
y_pred_test_sample = model.predict_proba(test_sample)
|
26 |
+
pred_probs = y_pred_test_sample[0]
|
27 |
+
|
28 |
+
return {labels[i]: float(pred_probs[i]) for i in range(len(labels))}
|
29 |
+
|
30 |
+
|
31 |
+
title = "NewsClassifier"
|
32 |
+
description = "Enter a news headline, and this app will classify it into one of the categories."
|
33 |
+
instructions = "Type or paste a news headline in the textbox and press Enter."
|
34 |
+
|
35 |
+
iface = gr.Interface(
|
36 |
+
fn=prediction,
|
37 |
+
inputs=gr.Textbox(),
|
38 |
+
outputs=gr.Label(num_top_classes=7),
|
39 |
+
title=title,
|
40 |
+
description=description,
|
41 |
+
examples=[
|
42 |
+
["Global Smartphone Shipments Will Hit Lowest Point in a Decade, IDC Says"],
|
43 |
+
["John Wick's First Spinoff is the Rare Prequel That Justifies Its Existence"],
|
44 |
+
["Research provides a better understanding of how light stimulates the brain"],
|
45 |
+
["Lionel Messi scores free kick golazo for Argentina in World Cup qualifiers"],
|
46 |
+
],
|
47 |
+
article=instructions,
|
48 |
+
)
|
49 |
+
|
50 |
+
iface.launch(share=True)
|
artifacts/model.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7ad2ee4ee7324989ef530eae760f3cb4a660aaca0bae36469c9ae6723130b83d
|
3 |
+
size 498672838
|
dataset/preprocessed/test.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
dataset/preprocessed/train.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
dataset/raw/news_dataset.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:98c974915d3871f9fd92985fa2413afb995adb7545e6ee4a036240f3a20abd18
|
3 |
+
size 18273585
|
docs/index.md
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Welcome to NewsClassifier Docs
|
2 |
+
|
3 |
+
For source visit [ManishW315/NewsClassifier](https://github.com/ManishW315/NewsClassifier).
|
4 |
+
|
5 |
+
## Project layout
|
6 |
+
<pre>
|
7 |
+
NewsClassifier
|
8 |
+
│
|
9 |
+
├───dataset
|
10 |
+
│ ├───preprocessed
|
11 |
+
│ │ test.csv
|
12 |
+
│ │ train.csv
|
13 |
+
│ │
|
14 |
+
│ └───raw
|
15 |
+
│ news_dataset.csv
|
16 |
+
│
|
17 |
+
├───newsclassifier
|
18 |
+
│ │ data.py
|
19 |
+
│ │ models.py
|
20 |
+
│ │ train.py
|
21 |
+
│ │ tune.py
|
22 |
+
│ │ inference.py
|
23 |
+
│ │ utils.py
|
24 |
+
│ │
|
25 |
+
│ │
|
26 |
+
│ └───config
|
27 |
+
│ config.py
|
28 |
+
│ sweep_config.yaml
|
29 |
+
│
|
30 |
+
├───notebooks
|
31 |
+
│ eda.ipynb
|
32 |
+
│ newsclassifier-roberta-base-wandb-track-sweep.ipynb
|
33 |
+
│
|
34 |
+
└───test
|
35 |
+
</pre>
|
docs/newsclassifier/config.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
::: newsclassifier.config.config
|
docs/newsclassifier/data.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
::: newsclassifier.data
|
docs/newsclassifier/inference.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
::: newsclassifier.inference
|
docs/newsclassifier/models.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
::: newsclassifier.models
|
docs/newsclassifier/train.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
::: newsclassifier.train
|
docs/newsclassifier/tune.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
::: newsclassifier.tune
|
docs/newsclassifier/utils.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
::: newsclassifier.utils
|
logs/error.log
ADDED
File without changes
|
logs/info.log
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
INFO 2023-11-01 08:36:13,083 [root:data.py:load_dataset:24]
|
2 |
+
Loading Data.
|
3 |
+
|
4 |
+
INFO 2023-11-01 08:40:59,763 [root:data.py:load_dataset:24]
|
5 |
+
Loading Data.
|
6 |
+
|
7 |
+
INFO 2023-11-01 08:43:10,163 [root:data.py:load_dataset:24]
|
8 |
+
Loading Data.
|
9 |
+
|
10 |
+
INFO 2023-11-01 08:44:10,037 [root:data.py:load_dataset:24]
|
11 |
+
Loading Data.
|
12 |
+
|
13 |
+
INFO 2023-11-01 08:47:58,057 [root:data.py:load_dataset:27]
|
14 |
+
Loading Data.
|
15 |
+
|
16 |
+
INFO 2023-11-01 08:48:28,766 [root:data.py:load_dataset:27]
|
17 |
+
Loading Data.
|
18 |
+
|
19 |
+
INFO 2023-11-01 08:49:43,821 [root:data.py:load_dataset:27]
|
20 |
+
Loading Data.
|
21 |
+
|
22 |
+
INFO 2023-11-01 08:49:46,460 [root:data.py:data_split:105]
|
23 |
+
Splitting Data.
|
24 |
+
|
25 |
+
INFO 2023-11-01 08:49:46,564 [root:data.py:data_split:116]
|
26 |
+
Saving and storing data splits.
|
27 |
+
|
28 |
+
INFO 2023-11-02 00:09:13,890 [root:data.py:clean_text:58]
|
29 |
+
Cleaning input text.
|
30 |
+
|
31 |
+
INFO 2023-11-02 00:11:13,522 [root:data.py:clean_text:58]
|
32 |
+
Cleaning input text.
|
33 |
+
|
34 |
+
INFO 2023-11-02 00:23:17,886 [root:data.py:clean_text:58]
|
35 |
+
Cleaning input text.
|
36 |
+
|
37 |
+
INFO 2023-11-02 00:25:53,585 [root:data.py:clean_text:58]
|
38 |
+
Cleaning input text.
|
39 |
+
|
40 |
+
INFO 2023-11-02 00:25:53,642 [root:data.py:prepare_input:146]
|
41 |
+
Tokenizing input text.
|
42 |
+
|
43 |
+
INFO 2023-11-02 00:30:41,901 [root:data.py:clean_text:58]
|
44 |
+
Cleaning input text.
|
45 |
+
|
46 |
+
INFO 2023-11-02 00:30:41,919 [root:data.py:prepare_input:146]
|
47 |
+
Tokenizing input text.
|
48 |
+
|
49 |
+
INFO 2023-11-02 00:36:18,514 [root:data.py:clean_text:58]
|
50 |
+
Cleaning input text.
|
51 |
+
|
52 |
+
INFO 2023-11-02 00:36:18,538 [root:data.py:prepare_input:146]
|
53 |
+
Tokenizing input text.
|
54 |
+
|
55 |
+
INFO 2023-11-02 10:47:32,805 [root:data.py:prepare_input:146]
|
56 |
+
Tokenizing input text.
|
57 |
+
|
58 |
+
INFO 2023-11-02 10:48:36,522 [root:data.py:prepare_input:146]
|
59 |
+
Tokenizing input text.
|
60 |
+
|
61 |
+
INFO 2023-11-02 10:48:52,388 [root:data.py:prepare_input:146]
|
62 |
+
Tokenizing input text.
|
63 |
+
|
64 |
+
INFO 2023-11-02 10:49:14,171 [root:data.py:prepare_input:146]
|
65 |
+
Tokenizing input text.
|
66 |
+
|
67 |
+
INFO 2023-11-02 10:50:10,611 [root:data.py:prepare_input:146]
|
68 |
+
Tokenizing input text.
|
69 |
+
|
70 |
+
INFO 2023-11-02 10:50:27,112 [root:data.py:prepare_input:146]
|
71 |
+
Tokenizing input text.
|
72 |
+
|
73 |
+
INFO 2023-11-02 10:50:51,887 [root:data.py:prepare_input:146]
|
74 |
+
Tokenizing input text.
|
75 |
+
|
76 |
+
INFO 2023-11-02 10:51:44,829 [root:data.py:prepare_input:146]
|
77 |
+
Tokenizing input text.
|
78 |
+
|
79 |
+
INFO 2023-11-02 10:52:06,984 [root:data.py:prepare_input:146]
|
80 |
+
Tokenizing input text.
|
81 |
+
|
82 |
+
INFO 2023-11-02 10:52:20,660 [root:data.py:prepare_input:146]
|
83 |
+
Tokenizing input text.
|
84 |
+
|
85 |
+
INFO 2023-11-02 10:52:33,236 [root:data.py:prepare_input:146]
|
86 |
+
Tokenizing input text.
|
87 |
+
|
88 |
+
INFO 2023-11-02 10:53:05,679 [root:data.py:prepare_input:146]
|
89 |
+
Tokenizing input text.
|
90 |
+
|
91 |
+
INFO 2023-11-02 10:53:20,561 [root:data.py:prepare_input:146]
|
92 |
+
Tokenizing input text.
|
93 |
+
|
94 |
+
INFO 2023-11-02 10:53:29,476 [root:data.py:prepare_input:146]
|
95 |
+
Tokenizing input text.
|
96 |
+
|
97 |
+
INFO 2023-11-02 10:53:38,528 [root:data.py:prepare_input:146]
|
98 |
+
Tokenizing input text.
|
99 |
+
|
100 |
+
INFO 2023-11-02 11:01:28,685 [root:data.py:prepare_input:146]
|
101 |
+
Tokenizing input text.
|
102 |
+
|
103 |
+
INFO 2023-11-02 14:50:33,049 [root:data.py:prepare_input:146]
|
104 |
+
Tokenizing input text.
|
105 |
+
|
106 |
+
INFO 2023-11-02 14:52:09,259 [root:data.py:prepare_input:146]
|
107 |
+
Tokenizing input text.
|
108 |
+
|
109 |
+
INFO 2023-11-02 14:53:30,933 [root:data.py:prepare_input:146]
|
110 |
+
Tokenizing input text.
|
111 |
+
|
112 |
+
INFO 2023-11-02 21:22:31,654 [root:data.py:prepare_input:146]
|
113 |
+
Tokenizing input text.
|
114 |
+
|
115 |
+
INFO 2023-11-02 21:30:09,258 [root:data.py:clean_text:58]
|
116 |
+
Cleaning input text.
|
117 |
+
|
118 |
+
INFO 2023-11-02 21:30:46,696 [root:data.py:prepare_input:146]
|
119 |
+
Tokenizing input text.
|
120 |
+
|
121 |
+
INFO 2023-11-02 21:39:13,401 [root:data.py:prepare_input:146]
|
122 |
+
Tokenizing input text.
|
123 |
+
|
124 |
+
INFO 2023-11-02 21:40:13,665 [root:data.py:prepare_input:146]
|
125 |
+
Tokenizing input text.
|
126 |
+
|
127 |
+
INFO 2023-11-02 21:44:01,779 [root:data.py:prepare_input:146]
|
128 |
+
Tokenizing input text.
|
129 |
+
|
130 |
+
INFO 2023-11-02 21:44:20,110 [root:data.py:prepare_input:146]
|
131 |
+
Tokenizing input text.
|
132 |
+
|
133 |
+
INFO 2023-11-02 21:45:52,673 [root:data.py:prepare_input:146]
|
134 |
+
Tokenizing input text.
|
135 |
+
|
136 |
+
INFO 2023-11-02 21:48:31,415 [root:data.py:prepare_input:146]
|
137 |
+
Tokenizing input text.
|
138 |
+
|
139 |
+
INFO 2023-11-02 21:49:40,642 [root:data.py:prepare_input:146]
|
140 |
+
Tokenizing input text.
|
141 |
+
|
142 |
+
INFO 2023-11-02 21:50:42,110 [root:data.py:prepare_input:146]
|
143 |
+
Tokenizing input text.
|
144 |
+
|
145 |
+
INFO 2023-11-02 21:55:50,749 [root:data.py:prepare_input:146]
|
146 |
+
Tokenizing input text.
|
147 |
+
|
148 |
+
INFO 2023-11-02 21:56:30,951 [root:data.py:prepare_input:146]
|
149 |
+
Tokenizing input text.
|
150 |
+
|
151 |
+
INFO 2023-11-02 21:56:47,555 [root:data.py:prepare_input:146]
|
152 |
+
Tokenizing input text.
|
153 |
+
|
154 |
+
INFO 2023-11-02 21:56:53,879 [root:data.py:prepare_input:146]
|
155 |
+
Tokenizing input text.
|
156 |
+
|
157 |
+
INFO 2023-11-02 21:57:11,729 [root:data.py:prepare_input:146]
|
158 |
+
Tokenizing input text.
|
159 |
+
|
160 |
+
INFO 2023-11-02 21:57:14,827 [root:data.py:prepare_input:146]
|
161 |
+
Tokenizing input text.
|
162 |
+
|
163 |
+
INFO 2023-11-02 21:57:23,501 [root:data.py:prepare_input:146]
|
164 |
+
Tokenizing input text.
|
165 |
+
|
166 |
+
INFO 2023-11-02 22:20:57,360 [root:data.py:prepare_input:146]
|
167 |
+
Tokenizing input text.
|
168 |
+
|
169 |
+
INFO 2023-11-02 22:25:04,600 [root:data.py:prepare_input:146]
|
170 |
+
Tokenizing input text.
|
171 |
+
|
172 |
+
INFO 2023-11-02 22:25:15,152 [root:data.py:prepare_input:146]
|
173 |
+
Tokenizing input text.
|
174 |
+
|
175 |
+
INFO 2023-11-02 22:47:41,043 [root:data.py:prepare_input:146]
|
176 |
+
Tokenizing input text.
|
177 |
+
|
178 |
+
INFO 2023-11-02 22:47:47,106 [root:data.py:prepare_input:146]
|
179 |
+
Tokenizing input text.
|
180 |
+
|
181 |
+
INFO 2023-11-02 22:47:52,655 [root:data.py:prepare_input:146]
|
182 |
+
Tokenizing input text.
|
183 |
+
|
184 |
+
INFO 2023-11-02 22:47:56,948 [root:data.py:prepare_input:146]
|
185 |
+
Tokenizing input text.
|
186 |
+
|
mkdocs.yml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
site_name: NewsClassifier Docs
|
2 |
+
# site_url:
|
3 |
+
repo_url: https://github.com/ManishW315/NewsClassifier
|
4 |
+
nav:
|
5 |
+
- Home: index.md
|
6 |
+
- newsclassifier:
|
7 |
+
- config: newsclassifier\config.md
|
8 |
+
- data: newsclassifier\data.md
|
9 |
+
- models: newsclassifier\models.md
|
10 |
+
- train: newsclassifier\train.md
|
11 |
+
- tune: newsclassifier\tune.md
|
12 |
+
- inference: newsclassifier\inference.md
|
13 |
+
# - predict: newsclassifier/predict.md
|
14 |
+
# - serve: newsclassifier/serve.md
|
15 |
+
- utils: newsclassifier\utils.md
|
16 |
+
theme: readthedocs
|
17 |
+
plugins:
|
18 |
+
- mkdocstrings
|
19 |
+
watch:
|
20 |
+
- . # reload docs for any file changes
|
newsclassifier/__init__.py
ADDED
File without changes
|
newsclassifier/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (182 Bytes). View file
|
|
newsclassifier/__pycache__/config.cpython-310.pyc
ADDED
Binary file (2.88 kB). View file
|
|
newsclassifier/__pycache__/data.cpython-310.pyc
ADDED
Binary file (6.76 kB). View file
|
|
newsclassifier/__pycache__/models.cpython-310.pyc
ADDED
Binary file (2.45 kB). View file
|
|
newsclassifier/__pycache__/predict.cpython-310.pyc
ADDED
Binary file (1.31 kB). View file
|
|
newsclassifier/__pycache__/serve.cpython-310.pyc
ADDED
Binary file (1.25 kB). View file
|
|
newsclassifier/config/__init__.py
ADDED
File without changes
|
newsclassifier/config/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (189 Bytes). View file
|
|
newsclassifier/config/__pycache__/config.cpython-310.pyc
ADDED
Binary file (3.24 kB). View file
|
|
newsclassifier/config/config.py
ADDED
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import os
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from logging.handlers import RotatingFileHandler
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
import nltk
|
8 |
+
|
9 |
+
from rich.logging import RichHandler
|
10 |
+
|
11 |
+
# from nltk.corpus import stopwords
|
12 |
+
# nltk.download("stopwords")
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class Cfg:
|
17 |
+
STOPWORDS = [
|
18 |
+
"i",
|
19 |
+
"me",
|
20 |
+
"my",
|
21 |
+
"myself",
|
22 |
+
"we",
|
23 |
+
"our",
|
24 |
+
"ours",
|
25 |
+
"ourselves",
|
26 |
+
"you",
|
27 |
+
"you're",
|
28 |
+
"you've",
|
29 |
+
"you'll",
|
30 |
+
"you'd",
|
31 |
+
"your",
|
32 |
+
"yours",
|
33 |
+
"yourself",
|
34 |
+
"yourselves",
|
35 |
+
"he",
|
36 |
+
"him",
|
37 |
+
"his",
|
38 |
+
"himself",
|
39 |
+
"she",
|
40 |
+
"she's",
|
41 |
+
"her",
|
42 |
+
"hers",
|
43 |
+
"herself",
|
44 |
+
"it",
|
45 |
+
"it's",
|
46 |
+
"its",
|
47 |
+
"itself",
|
48 |
+
"they",
|
49 |
+
"them",
|
50 |
+
"their",
|
51 |
+
"theirs",
|
52 |
+
"themselves",
|
53 |
+
"what",
|
54 |
+
"which",
|
55 |
+
"who",
|
56 |
+
"whom",
|
57 |
+
"this",
|
58 |
+
"that",
|
59 |
+
"that'll",
|
60 |
+
"these",
|
61 |
+
"those",
|
62 |
+
"am",
|
63 |
+
"is",
|
64 |
+
"are",
|
65 |
+
"was",
|
66 |
+
"were",
|
67 |
+
"be",
|
68 |
+
"been",
|
69 |
+
"being",
|
70 |
+
"have",
|
71 |
+
"has",
|
72 |
+
"had",
|
73 |
+
"having",
|
74 |
+
"do",
|
75 |
+
"does",
|
76 |
+
"did",
|
77 |
+
"doing",
|
78 |
+
"a",
|
79 |
+
"an",
|
80 |
+
"the",
|
81 |
+
"and",
|
82 |
+
"but",
|
83 |
+
"if",
|
84 |
+
"or",
|
85 |
+
"because",
|
86 |
+
"as",
|
87 |
+
"until",
|
88 |
+
"while",
|
89 |
+
"of",
|
90 |
+
"at",
|
91 |
+
"by",
|
92 |
+
"for",
|
93 |
+
"with",
|
94 |
+
"about",
|
95 |
+
"against",
|
96 |
+
"between",
|
97 |
+
"into",
|
98 |
+
"through",
|
99 |
+
"during",
|
100 |
+
"before",
|
101 |
+
"after",
|
102 |
+
"above",
|
103 |
+
"below",
|
104 |
+
"to",
|
105 |
+
"from",
|
106 |
+
"up",
|
107 |
+
"down",
|
108 |
+
"in",
|
109 |
+
"out",
|
110 |
+
"on",
|
111 |
+
"off",
|
112 |
+
"over",
|
113 |
+
"under",
|
114 |
+
"again",
|
115 |
+
"further",
|
116 |
+
"then",
|
117 |
+
"once",
|
118 |
+
"here",
|
119 |
+
"there",
|
120 |
+
"when",
|
121 |
+
"where",
|
122 |
+
"why",
|
123 |
+
"how",
|
124 |
+
"all",
|
125 |
+
"any",
|
126 |
+
"both",
|
127 |
+
"each",
|
128 |
+
"few",
|
129 |
+
"more",
|
130 |
+
"most",
|
131 |
+
"other",
|
132 |
+
"some",
|
133 |
+
"such",
|
134 |
+
"no",
|
135 |
+
"nor",
|
136 |
+
"not",
|
137 |
+
"only",
|
138 |
+
"own",
|
139 |
+
"same",
|
140 |
+
"so",
|
141 |
+
"than",
|
142 |
+
"too",
|
143 |
+
"very",
|
144 |
+
"s",
|
145 |
+
"t",
|
146 |
+
"can",
|
147 |
+
"will",
|
148 |
+
"just",
|
149 |
+
"don",
|
150 |
+
"don't",
|
151 |
+
"should",
|
152 |
+
"should've",
|
153 |
+
"now",
|
154 |
+
"d",
|
155 |
+
"ll",
|
156 |
+
"m",
|
157 |
+
"o",
|
158 |
+
"re",
|
159 |
+
"ve",
|
160 |
+
"y",
|
161 |
+
"ain",
|
162 |
+
"aren",
|
163 |
+
"aren't",
|
164 |
+
"couldn",
|
165 |
+
"couldn't",
|
166 |
+
"didn",
|
167 |
+
"didn't",
|
168 |
+
"doesn",
|
169 |
+
"doesn't",
|
170 |
+
"hadn",
|
171 |
+
"hadn't",
|
172 |
+
"hasn",
|
173 |
+
"hasn't",
|
174 |
+
"haven",
|
175 |
+
"haven't",
|
176 |
+
"isn",
|
177 |
+
"isn't",
|
178 |
+
"ma",
|
179 |
+
"mightn",
|
180 |
+
"mightn't",
|
181 |
+
"mustn",
|
182 |
+
"mustn't",
|
183 |
+
"needn",
|
184 |
+
"needn't",
|
185 |
+
"shan",
|
186 |
+
"shan't",
|
187 |
+
"shouldn",
|
188 |
+
"shouldn't",
|
189 |
+
"wasn",
|
190 |
+
"wasn't",
|
191 |
+
"weren",
|
192 |
+
"weren't",
|
193 |
+
"won",
|
194 |
+
"won't",
|
195 |
+
"wouldn",
|
196 |
+
"wouldn't",
|
197 |
+
]
|
198 |
+
|
199 |
+
dataset_loc = os.path.join((Path(__file__).parent.parent.parent), "dataset", "raw", "news_dataset.csv")
|
200 |
+
preprocessed_data_path = os.path.join((Path(__file__).parent.parent.parent), "dataset", "preprocessed")
|
201 |
+
sweep_config_path = os.path.join((Path(__file__).parent), "sweep_config.yaml")
|
202 |
+
|
203 |
+
# Logs path
|
204 |
+
logs_path = os.path.join((Path(__file__).parent.parent.parent), "logs")
|
205 |
+
artifacts_path = os.path.join((Path(__file__).parent.parent.parent), "artifacts")
|
206 |
+
model_path = os.path.join((Path(__file__).parent.parent.parent), "artifacts", "model.pt")
|
207 |
+
|
208 |
+
test_size = 0.2
|
209 |
+
|
210 |
+
add_special_tokens = True
|
211 |
+
max_len = 50
|
212 |
+
pad_to_max_length = True
|
213 |
+
truncation = True
|
214 |
+
|
215 |
+
change_config = False
|
216 |
+
|
217 |
+
dropout_pb = 0.5
|
218 |
+
lr = 1e-4
|
219 |
+
lr_redfactor = 0.7
|
220 |
+
lr_redpatience = 4
|
221 |
+
epochs = 10
|
222 |
+
batch_size = 128
|
223 |
+
num_classes = 7
|
224 |
+
|
225 |
+
sweep_run = 10
|
226 |
+
|
227 |
+
index_to_class = {0: "Business", 1: "Entertainment", 2: "Health", 3: "Science", 4: "Sports", 5: "Technology", 6: "Worldwide"}
|
228 |
+
|
229 |
+
|
230 |
+
# Create logs folder
|
231 |
+
os.makedirs(Cfg.logs_path, exist_ok=True)
|
232 |
+
|
233 |
+
# Get root logger
|
234 |
+
logger = logging.getLogger()
|
235 |
+
logger.setLevel(logging.INFO)
|
236 |
+
|
237 |
+
# Create handlers
|
238 |
+
console_handler = RichHandler(markup=True)
|
239 |
+
console_handler.setLevel(logging.INFO)
|
240 |
+
|
241 |
+
info_handler = RotatingFileHandler(
|
242 |
+
filename=Path(Cfg.logs_path, "info.log"),
|
243 |
+
maxBytes=10485760, # 1 MB
|
244 |
+
backupCount=10,
|
245 |
+
)
|
246 |
+
info_handler.setLevel(logging.INFO)
|
247 |
+
|
248 |
+
error_handler = RotatingFileHandler(
|
249 |
+
filename=Path(Cfg.logs_path, "error.log"),
|
250 |
+
maxBytes=10485760, # 1 MB
|
251 |
+
backupCount=10,
|
252 |
+
)
|
253 |
+
error_handler.setLevel(logging.ERROR)
|
254 |
+
|
255 |
+
# Create formatters
|
256 |
+
minimal_formatter = logging.Formatter(fmt="%(message)s")
|
257 |
+
detailed_formatter = logging.Formatter(fmt="%(levelname)s %(asctime)s [%(name)s:%(filename)s:%(funcName)s:%(lineno)d]\n%(message)s\n")
|
258 |
+
|
259 |
+
# Hook it all up
|
260 |
+
console_handler.setFormatter(fmt=minimal_formatter)
|
261 |
+
info_handler.setFormatter(fmt=detailed_formatter)
|
262 |
+
error_handler.setFormatter(fmt=detailed_formatter)
|
263 |
+
logger.addHandler(hdlr=console_handler)
|
264 |
+
logger.addHandler(hdlr=info_handler)
|
265 |
+
logger.addHandler(hdlr=error_handler)
|
newsclassifier/config/sweep_config.yaml
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
method: random
|
2 |
+
metric:
|
3 |
+
name: val_loss
|
4 |
+
goal: minimize
|
5 |
+
parameters:
|
6 |
+
dropout_pb:
|
7 |
+
values: [0.3, 0.4, 0.5]
|
8 |
+
learning_rate:
|
9 |
+
values: [0.0001, 0.001, 0.01]
|
10 |
+
batch_size:
|
11 |
+
values: [32, 64, 128]
|
12 |
+
lr_reduce_factor:
|
13 |
+
values: [0.5, 0.6, 0.7, 0.8]
|
14 |
+
lr_reduce_patience:
|
15 |
+
values: [2, 3, 4, 5]
|
16 |
+
epochs:
|
17 |
+
value: 1
|
newsclassifier/data.py
ADDED
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
from typing import Dict, Tuple
|
4 |
+
from warnings import filterwarnings
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
from sklearn.model_selection import train_test_split
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from newsclassifier.config.config import Cfg, logger
|
11 |
+
from torch.utils.data import Dataset
|
12 |
+
from transformers import RobertaTokenizer
|
13 |
+
|
14 |
+
filterwarnings("ignore")
|
15 |
+
|
16 |
+
|
17 |
+
def load_dataset(filepath: str, print_i: int = 0) -> pd.DataFrame:
|
18 |
+
"""load data from source into a Pandas DataFrame.
|
19 |
+
|
20 |
+
Args:
|
21 |
+
filepath (str): file location.
|
22 |
+
print_i (int): Print number of instances.
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
pd.DataFrame: Pandas DataFrame of the data.
|
26 |
+
"""
|
27 |
+
logger.info("Loading Data.")
|
28 |
+
df = pd.read_csv(filepath)
|
29 |
+
if print_i:
|
30 |
+
print(df.head(print_i), "\n")
|
31 |
+
return df
|
32 |
+
|
33 |
+
|
34 |
+
def prepare_data(df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
|
35 |
+
"""Separate headlines instance and feature selection.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
df: original dataframe.
|
39 |
+
|
40 |
+
Returns:
|
41 |
+
df: new dataframe with appropriate features.
|
42 |
+
headlines_df: dataframe cintaining "headlines" category instances.
|
43 |
+
"""
|
44 |
+
logger.info("Preparing Data.")
|
45 |
+
try:
|
46 |
+
df = df[["Title", "Category"]]
|
47 |
+
df.rename(columns={"Title": "Text"}, inplace=True)
|
48 |
+
df, headlines_df = df[df["Category"] != "Headlines"].reset_index(drop=True), df[df["Category"] == "Headlines"].reset_index(drop=True)
|
49 |
+
except Exception as e:
|
50 |
+
logger.error(e)
|
51 |
+
|
52 |
+
return df, headlines_df
|
53 |
+
|
54 |
+
|
55 |
+
def clean_text(text: str) -> str:
|
56 |
+
"""Clean text (lower, puntuations removal, blank space removal)."""
|
57 |
+
# lower case the text
|
58 |
+
logger.info("Cleaning input text.")
|
59 |
+
text = text.lower() # necessary to do before as stopwords are in lower case
|
60 |
+
|
61 |
+
# remove stopwords
|
62 |
+
stp_pattern = re.compile(r"\b(" + r"|".join(Cfg.STOPWORDS) + r")\b\s*")
|
63 |
+
text = stp_pattern.sub("", text)
|
64 |
+
|
65 |
+
# custom cleaning
|
66 |
+
text = text.strip() # remove space at start or end if any
|
67 |
+
text = re.sub(" +", " ", text) # remove extra spaces
|
68 |
+
text = re.sub("[^A-Za-z0-9]+", " ", text) # remove characters that are not alphanumeric
|
69 |
+
|
70 |
+
return text
|
71 |
+
|
72 |
+
|
73 |
+
def preprocess(df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, Dict, Dict]:
|
74 |
+
"""Preprocess the data.
|
75 |
+
|
76 |
+
Args:
|
77 |
+
df: Dataframe on which the preprocessing steps need to be performed.
|
78 |
+
|
79 |
+
Returns:
|
80 |
+
df: Preprocessed Data.
|
81 |
+
class_to_index: class labels to indices mapping
|
82 |
+
class_to_index: indices to class labels mapping
|
83 |
+
"""
|
84 |
+
df, headlines_df = prepare_data(df)
|
85 |
+
|
86 |
+
cats = df["Category"].unique().tolist()
|
87 |
+
class_to_index = {tag: i for i, tag in enumerate(cats)}
|
88 |
+
index_to_class = {v: k for k, v in class_to_index.items()}
|
89 |
+
|
90 |
+
df["Text"] = df["Text"].apply(clean_text) # clean text
|
91 |
+
df = df[["Text", "Category"]]
|
92 |
+
try:
|
93 |
+
df["Category"] = df["Category"].map(class_to_index) # label encoding
|
94 |
+
except Exception as e:
|
95 |
+
logger.error(e)
|
96 |
+
return df, headlines_df, class_to_index, index_to_class
|
97 |
+
|
98 |
+
|
99 |
+
def data_split(df: pd.DataFrame, split_size: float = 0.2, stratify_on_target: bool = True, save_dfs: bool = False):
|
100 |
+
"""Split data into train and test sets.
|
101 |
+
|
102 |
+
Args:
|
103 |
+
df (pd.DataFrame): Data to be split.
|
104 |
+
split_size (float): train-test split ratio (test ratio).
|
105 |
+
stratify_on_target (bool): Whether to do stratify split on target.
|
106 |
+
target_sep (bool): Whether to do target setting for train and test sets.
|
107 |
+
save_dfs (bool): Whether to save dataset splits in artifacts.
|
108 |
+
|
109 |
+
Returns:
|
110 |
+
train-test splits (with/without target setting)
|
111 |
+
"""
|
112 |
+
logger.info("Splitting Data.")
|
113 |
+
try:
|
114 |
+
if stratify_on_target:
|
115 |
+
stra = df["Category"]
|
116 |
+
else:
|
117 |
+
stra = None
|
118 |
+
|
119 |
+
train, test = train_test_split(df, test_size=split_size, random_state=42, stratify=stra)
|
120 |
+
train_ds = pd.DataFrame(train, columns=df.columns)
|
121 |
+
test_ds = pd.DataFrame(test, columns=df.columns)
|
122 |
+
|
123 |
+
if save_dfs:
|
124 |
+
logger.info("Saving and storing data splits.")
|
125 |
+
|
126 |
+
os.makedirs(Cfg.preprocessed_data_path, exist_ok=True)
|
127 |
+
train.to_csv(os.path.join(Cfg.preprocessed_data_path, "train.csv"))
|
128 |
+
test.to_csv(os.path.join(Cfg.preprocessed_data_path, "test.csv"))
|
129 |
+
except Exception as e:
|
130 |
+
logger.error(e)
|
131 |
+
|
132 |
+
return train_ds, test_ds
|
133 |
+
|
134 |
+
|
135 |
+
def prepare_input(tokenizer: RobertaTokenizer, text: str) -> Dict:
|
136 |
+
"""Tokenize and prepare the input text using the provided tokenizer.
|
137 |
+
|
138 |
+
Args:
|
139 |
+
tokenizer (RobertaTokenizer): The Roberta tokenizer to encode the input.
|
140 |
+
text (str): The input text to be tokenized.
|
141 |
+
|
142 |
+
Returns:
|
143 |
+
inputs (dict): A dictionary containing the tokenized input with keys such as 'input_ids',
|
144 |
+
'attention_mask', etc.
|
145 |
+
"""
|
146 |
+
logger.info("Tokenizing input text.")
|
147 |
+
inputs = tokenizer.encode_plus(
|
148 |
+
text,
|
149 |
+
return_tensors=None,
|
150 |
+
add_special_tokens=Cfg.add_special_tokens,
|
151 |
+
max_length=Cfg.max_len,
|
152 |
+
pad_to_max_length=Cfg.pad_to_max_length,
|
153 |
+
truncation=Cfg.truncation,
|
154 |
+
)
|
155 |
+
for k, v in inputs.items():
|
156 |
+
inputs[k] = torch.tensor(v, dtype=torch.long)
|
157 |
+
return inputs
|
158 |
+
|
159 |
+
|
160 |
+
class NewsDataset(Dataset):
|
161 |
+
def __init__(self, ds):
|
162 |
+
self.texts = ds["Text"].values
|
163 |
+
self.labels = ds["Category"].values
|
164 |
+
|
165 |
+
def __len__(self):
|
166 |
+
return len(self.texts)
|
167 |
+
|
168 |
+
def __getitem__(self, item):
|
169 |
+
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
|
170 |
+
inputs = prepare_input(tokenizer, self.texts[item])
|
171 |
+
labels = torch.tensor(self.labels[item], dtype=torch.float)
|
172 |
+
return inputs, labels
|
173 |
+
|
174 |
+
|
175 |
+
def collate(inputs: Dict) -> Dict:
|
176 |
+
"""Collate and modify the input dictionary to have the same sequence length for a particular input batch.
|
177 |
+
|
178 |
+
Args:
|
179 |
+
inputs (dict): A dictionary containing input tensors with varying sequence lengths.
|
180 |
+
|
181 |
+
Returns:
|
182 |
+
modified_inputs (dict): A modified dictionary with input tensors trimmed to have the same sequence length.
|
183 |
+
"""
|
184 |
+
max_len = int(inputs["input_ids"].sum(axis=1).max())
|
185 |
+
for k, v in inputs.items():
|
186 |
+
inputs[k] = inputs[k][:, :max_len]
|
187 |
+
return inputs
|
188 |
+
|
189 |
+
|
190 |
+
if __name__ == "__main__":
|
191 |
+
df = load_dataset(Cfg.dataset_loc)
|
192 |
+
df, headlines_df, class_to_index, index_to_class = preprocess(df)
|
193 |
+
print(df)
|
194 |
+
print(class_to_index)
|
195 |
+
train_ds, val_ds = data_split(df, save_dfs=True)
|
196 |
+
dataset = NewsDataset(df)
|
197 |
+
print(dataset.__getitem__(0))
|
newsclassifier/inference.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Tuple
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from sklearn.metrics import (accuracy_score, f1_score, precision_score,
|
6 |
+
recall_score)
|
7 |
+
from tqdm.auto import tqdm
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from newsclassifier.config.config import Cfg, logger
|
11 |
+
from newsclassifier.data import NewsDataset, collate
|
12 |
+
from newsclassifier.models import CustomModel
|
13 |
+
from torch.utils.data import DataLoader
|
14 |
+
|
15 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
16 |
+
|
17 |
+
|
18 |
+
def test_step(test_loader: DataLoader, model) -> Tuple[np.ndarray, np.ndarray]:
|
19 |
+
"""Eval step."""
|
20 |
+
model.eval()
|
21 |
+
y_trues, y_preds = [], []
|
22 |
+
with torch.inference_mode():
|
23 |
+
for step, (inputs, labels) in tqdm(enumerate(test_loader)):
|
24 |
+
inputs = collate(inputs)
|
25 |
+
for k, v in inputs.items():
|
26 |
+
inputs[k] = v.to(device)
|
27 |
+
labels = labels.to(device)
|
28 |
+
y_pred = model(inputs)
|
29 |
+
y_trues.extend(labels.cpu().numpy())
|
30 |
+
y_preds.extend(torch.argmax(y_pred, dim=1).cpu().numpy())
|
31 |
+
return np.vstack(y_trues), np.vstack(y_preds)
|
32 |
+
|
33 |
+
|
34 |
+
def inference():
|
35 |
+
logger.info("Loading inference data.")
|
36 |
+
try:
|
37 |
+
test_dataset = NewsDataset(os.path.join(Cfg.preprocessed_data_path, "test.csv"))
|
38 |
+
test_loader = DataLoader(test_dataset, batch_size=Cfg.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False)
|
39 |
+
except Exception as e:
|
40 |
+
logger.error(e)
|
41 |
+
|
42 |
+
logger.info("loading model.")
|
43 |
+
try:
|
44 |
+
model = CustomModel(num_classes=Cfg.num_classes)
|
45 |
+
model.load_state_dict(torch.load(Cfg.model_path, map_location=torch.device("cpu")))
|
46 |
+
model.to(device)
|
47 |
+
except Exception as e:
|
48 |
+
logger.error(e)
|
49 |
+
|
50 |
+
y_true, y_pred = test_step(test_loader, model)
|
51 |
+
|
52 |
+
print(
|
53 |
+
f'Precision: {precision_score(y_true, y_pred, average="weighted")} \n Recall: {recall_score(y_true, y_pred, average="weighted")} \n F1: {f1_score(y_true, y_pred, average="weighted")} \n Accuracy: {accuracy_score(y_true, y_pred)}'
|
54 |
+
)
|
newsclassifier/models.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch.nn as nn
|
7 |
+
import torch.nn.functional as F
|
8 |
+
from transformers import RobertaModel
|
9 |
+
|
10 |
+
|
11 |
+
class CustomModel(nn.Module):
|
12 |
+
def __init__(self, num_classes, change_config=False, dropout_pb=0.0):
|
13 |
+
super(CustomModel, self).__init__()
|
14 |
+
if change_config:
|
15 |
+
pass
|
16 |
+
self.model = RobertaModel.from_pretrained("roberta-base")
|
17 |
+
self.hidden_size = self.model.config.hidden_size
|
18 |
+
self.num_classes = num_classes
|
19 |
+
self.dropout_pb = dropout_pb
|
20 |
+
self.dropout = torch.nn.Dropout(self.dropout_pb)
|
21 |
+
self.fc = nn.Linear(self.hidden_size, self.num_classes)
|
22 |
+
|
23 |
+
def forward(self, inputs):
|
24 |
+
output = self.model(**inputs)
|
25 |
+
z = self.dropout(output[1])
|
26 |
+
z = self.fc(z)
|
27 |
+
return z
|
28 |
+
|
29 |
+
@torch.inference_mode()
|
30 |
+
def predict(self, inputs):
|
31 |
+
self.eval()
|
32 |
+
z = self(inputs)
|
33 |
+
y_pred = torch.argmax(z, dim=1).cpu().numpy()
|
34 |
+
return y_pred
|
35 |
+
|
36 |
+
@torch.inference_mode()
|
37 |
+
def predict_proba(self, inputs):
|
38 |
+
self.eval()
|
39 |
+
z = self(inputs)
|
40 |
+
y_probs = F.softmax(z, dim=1).cpu().numpy()
|
41 |
+
return y_probs
|
42 |
+
|
43 |
+
def save(self, dp):
|
44 |
+
with open(Path(dp, "args.json"), "w") as fp:
|
45 |
+
contents = {
|
46 |
+
"dropout_pb": self.dropout_pb,
|
47 |
+
"hidden_size": self.hidden_size,
|
48 |
+
"num_classes": self.num_classes,
|
49 |
+
}
|
50 |
+
json.dump(contents, fp, indent=4, sort_keys=False)
|
51 |
+
torch.save(self.state_dict(), os.path.join(dp, "model.pt"))
|
52 |
+
|
53 |
+
@classmethod
|
54 |
+
def load(cls, args_fp, state_dict_fp):
|
55 |
+
with open(args_fp, "r") as fp:
|
56 |
+
kwargs = json.load(fp=fp)
|
57 |
+
llm = RobertaModel.from_pretrained("roberta-base")
|
58 |
+
model = cls(llm=llm, **kwargs)
|
59 |
+
model.load_state_dict(torch.load(state_dict_fp, map_location=torch.device("cpu")))
|
60 |
+
return model
|
newsclassifier/predict.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
import torch
|
6 |
+
from newsclassifier.config.config import Cfg, logger
|
7 |
+
from newsclassifier.data import clean_text, prepare_input
|
8 |
+
from newsclassifier.models import CustomModel
|
9 |
+
from transformers import RobertaTokenizer
|
10 |
+
|
11 |
+
|
12 |
+
def predict(text: str):
|
13 |
+
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
|
14 |
+
model = CustomModel(num_classes=7)
|
15 |
+
model.load_state_dict(torch.load(os.path.join(Cfg.artifacts_path, "model.pt"), map_location=torch.device("cpu")))
|
16 |
+
index_to_class = Cfg.index_to_class
|
17 |
+
sample_input = prepare_input(tokenizer, text)
|
18 |
+
input_ids = torch.unsqueeze(sample_input["input_ids"], 0).to("cpu")
|
19 |
+
attention_masks = torch.unsqueeze(sample_input["attention_mask"], 0).to("cpu")
|
20 |
+
test_sample = dict(input_ids=input_ids, attention_mask=attention_masks)
|
21 |
+
|
22 |
+
with torch.no_grad():
|
23 |
+
y_pred_test_sample = model.predict_proba(test_sample)
|
24 |
+
prediction = y_pred_test_sample[0]
|
25 |
+
|
26 |
+
return prediction
|
27 |
+
|
28 |
+
|
29 |
+
if __name__ == "__main__":
|
30 |
+
txt = clean_text("Funds punished for owning too few Nvidia")
|
31 |
+
pred_prob = predict(txt)
|
32 |
+
print(pred_prob)
|
newsclassifier/train.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gc
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
from typing import Tuple
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
from tqdm.auto import tqdm
|
8 |
+
|
9 |
+
import torch
|
10 |
+
import torch.nn as nn
|
11 |
+
import torch.nn.functional as F
|
12 |
+
import wandb
|
13 |
+
from newsclassifier.config.config import Cfg, logger
|
14 |
+
from newsclassifier.data import (NewsDataset, collate, data_split,
|
15 |
+
load_dataset, preprocess)
|
16 |
+
from newsclassifier.models import CustomModel
|
17 |
+
from torch.utils.data import DataLoader
|
18 |
+
|
19 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
20 |
+
|
21 |
+
|
22 |
+
def train_step(train_loader: DataLoader, model, num_classes: int, loss_fn, optimizer, epoch: int) -> float:
|
23 |
+
"""Train step."""
|
24 |
+
model.train()
|
25 |
+
loss = 0.0
|
26 |
+
total_iterations = len(train_loader)
|
27 |
+
desc = f"Training - Epoch {epoch+1}"
|
28 |
+
for step, (inputs, labels) in tqdm(enumerate(train_loader), total=total_iterations, desc=desc):
|
29 |
+
inputs = collate(inputs)
|
30 |
+
for k, v in inputs.items():
|
31 |
+
inputs[k] = v.to(device)
|
32 |
+
labels = labels.to(device)
|
33 |
+
optimizer.zero_grad() # reset gradients
|
34 |
+
y_pred = model(inputs) # forward pass
|
35 |
+
targets = F.one_hot(labels.long(), num_classes=num_classes).float() # one-hot (for loss_fn)
|
36 |
+
J = loss_fn(y_pred, targets) # define loss
|
37 |
+
J.backward() # backward pass
|
38 |
+
optimizer.step() # update weights
|
39 |
+
loss += (J.detach().item() - loss) / (step + 1) # cumulative loss
|
40 |
+
return loss
|
41 |
+
|
42 |
+
|
43 |
+
def eval_step(val_loader: DataLoader, model, num_classes: int, loss_fn, epoch: int) -> Tuple[float, np.ndarray, np.ndarray]:
|
44 |
+
"""Eval step."""
|
45 |
+
model.eval()
|
46 |
+
loss = 0.0
|
47 |
+
total_iterations = len(val_loader)
|
48 |
+
desc = f"Validation - Epoch {epoch+1}"
|
49 |
+
y_trues, y_preds = [], []
|
50 |
+
with torch.inference_mode():
|
51 |
+
for step, (inputs, labels) in tqdm(enumerate(val_loader), total=total_iterations, desc=desc):
|
52 |
+
inputs = collate(inputs)
|
53 |
+
for k, v in inputs.items():
|
54 |
+
inputs[k] = v.to(device)
|
55 |
+
labels = labels.to(device)
|
56 |
+
y_pred = model(inputs)
|
57 |
+
targets = F.one_hot(labels.long(), num_classes=num_classes).float() # one-hot (for loss_fn)
|
58 |
+
J = loss_fn(y_pred, targets).item()
|
59 |
+
loss += (J - loss) / (step + 1)
|
60 |
+
y_trues.extend(targets.cpu().numpy())
|
61 |
+
y_preds.extend(torch.argmax(y_pred, dim=1).cpu().numpy())
|
62 |
+
return loss, np.vstack(y_trues), np.vstack(y_preds)
|
63 |
+
|
64 |
+
|
65 |
+
def train_loop(config=None):
|
66 |
+
# ====================================================
|
67 |
+
# loader
|
68 |
+
# ====================================================
|
69 |
+
|
70 |
+
config = dict(
|
71 |
+
batch_size=Cfg.batch_size,
|
72 |
+
num_classes=Cfg.num_classes,
|
73 |
+
epochs=Cfg.epochs,
|
74 |
+
dropout_pb=Cfg.dropout_pb,
|
75 |
+
learning_rate=Cfg.lr,
|
76 |
+
lr_reduce_factor=Cfg.lr_redfactor,
|
77 |
+
lr_reduce_patience=Cfg.lr_redpatience,
|
78 |
+
)
|
79 |
+
|
80 |
+
with wandb.init(project="NewsClassifier", config=config):
|
81 |
+
config = wandb.config
|
82 |
+
|
83 |
+
df = load_dataset(Cfg.dataset_loc)
|
84 |
+
ds, headlines_df, class_to_index, index_to_class = preprocess(df)
|
85 |
+
train_ds, val_ds = data_split(ds, test_size=Cfg.test_size)
|
86 |
+
|
87 |
+
logger.info("Preparing Data.")
|
88 |
+
|
89 |
+
train_dataset = NewsDataset(train_ds)
|
90 |
+
valid_dataset = NewsDataset(val_ds)
|
91 |
+
|
92 |
+
train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
|
93 |
+
valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False)
|
94 |
+
|
95 |
+
# ====================================================
|
96 |
+
# model
|
97 |
+
# ====================================================
|
98 |
+
|
99 |
+
logger.info("Creating Custom Model.")
|
100 |
+
num_classes = config.num_classes
|
101 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
102 |
+
|
103 |
+
model = CustomModel(num_classes=num_classes, dropout_pb=config.dropout_pb)
|
104 |
+
model.to(device)
|
105 |
+
|
106 |
+
# ====================================================
|
107 |
+
# Training components
|
108 |
+
# ====================================================
|
109 |
+
criterion = nn.BCEWithLogitsLoss()
|
110 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
|
111 |
+
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
112 |
+
optimizer, mode="min", factor=config.lr_reduce_factor, patience=config.lr_reduce_patience
|
113 |
+
)
|
114 |
+
|
115 |
+
# ====================================================
|
116 |
+
# loop
|
117 |
+
# ====================================================
|
118 |
+
wandb.watch(model, criterion, log="all", log_freq=10)
|
119 |
+
|
120 |
+
min_loss = np.inf
|
121 |
+
logger.info("Staring Training Loop.")
|
122 |
+
for epoch in range(config.epochs):
|
123 |
+
try:
|
124 |
+
start_time = time.time()
|
125 |
+
|
126 |
+
# Step
|
127 |
+
train_loss = train_step(train_loader, model, num_classes, criterion, optimizer, epoch)
|
128 |
+
val_loss, _, _ = eval_step(valid_loader, model, num_classes, criterion, epoch)
|
129 |
+
scheduler.step(val_loss)
|
130 |
+
|
131 |
+
# scoring
|
132 |
+
elapsed = time.time() - start_time
|
133 |
+
wandb.log({"epoch": epoch + 1, "train_loss": train_loss, "val_loss": val_loss})
|
134 |
+
print(f"Epoch {epoch+1} - avg_train_loss: {train_loss:.4f} avg_val_loss: {val_loss:.4f} time: {elapsed:.0f}s")
|
135 |
+
|
136 |
+
if min_loss > val_loss:
|
137 |
+
min_loss = val_loss
|
138 |
+
print("Best Score : saving model.")
|
139 |
+
os.makedirs(Cfg.artifacts_path, exist_ok=True)
|
140 |
+
model.save(Cfg.artifacts_path)
|
141 |
+
print(f"\nSaved Best Model Score: {min_loss:.4f}\n\n")
|
142 |
+
except Exception as e:
|
143 |
+
logger.error(f"Epoch - {epoch+1}, {e}")
|
144 |
+
|
145 |
+
wandb.save(os.path.join(Cfg.artifacts_path, "model.pt"))
|
146 |
+
torch.cuda.empty_cache()
|
147 |
+
gc.collect()
|
148 |
+
|
149 |
+
|
150 |
+
if __name__ == "__main__":
|
151 |
+
train_loop()
|
newsclassifier/tune.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gc
|
2 |
+
import time
|
3 |
+
from typing import Tuple
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
import wandb
|
10 |
+
from newsclassifier.config.config import Cfg, logger
|
11 |
+
from newsclassifier.data import (NewsDataset, data_split, load_dataset,
|
12 |
+
preprocess)
|
13 |
+
from newsclassifier.models import CustomModel
|
14 |
+
from newsclassifier.train import eval_step, train_step
|
15 |
+
from newsclassifier.utils import read_yaml
|
16 |
+
from torch.utils.data import DataLoader
|
17 |
+
|
18 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
19 |
+
|
20 |
+
|
21 |
+
def tune_loop(config=None):
|
22 |
+
# ====================================================
|
23 |
+
# loader
|
24 |
+
# ====================================================
|
25 |
+
logger.info("Starting Tuning.")
|
26 |
+
with wandb.init(project="NewsClassifier", config=config):
|
27 |
+
config = wandb.config
|
28 |
+
|
29 |
+
df = load_dataset(Cfg.dataset_loc)
|
30 |
+
ds, headlines_df, class_to_index, index_to_class = preprocess(df)
|
31 |
+
train_ds, val_ds = data_split(ds, test_size=Cfg.test_size)
|
32 |
+
|
33 |
+
train_dataset = NewsDataset(train_ds)
|
34 |
+
valid_dataset = NewsDataset(val_ds)
|
35 |
+
|
36 |
+
train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
|
37 |
+
valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False)
|
38 |
+
|
39 |
+
# ====================================================
|
40 |
+
# model
|
41 |
+
# ====================================================
|
42 |
+
num_classes = Cfg.num_classes
|
43 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
44 |
+
|
45 |
+
model = CustomModel(num_classes=num_classes, dropout_pb=config.dropout_pb)
|
46 |
+
model.to(device)
|
47 |
+
|
48 |
+
# ====================================================
|
49 |
+
# Training components
|
50 |
+
# ====================================================
|
51 |
+
criterion = nn.BCEWithLogitsLoss()
|
52 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
|
53 |
+
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
54 |
+
optimizer, mode="min", factor=config.lr_reduce_factor, patience=config.lr_reduce_patience
|
55 |
+
)
|
56 |
+
|
57 |
+
# ====================================================
|
58 |
+
# loop
|
59 |
+
# ====================================================
|
60 |
+
wandb.watch(model, criterion, log="all", log_freq=10)
|
61 |
+
|
62 |
+
for epoch in range(config.epochs):
|
63 |
+
try:
|
64 |
+
start_time = time.time()
|
65 |
+
|
66 |
+
# Step
|
67 |
+
train_loss = train_step(train_loader, model, num_classes, criterion, optimizer, epoch)
|
68 |
+
val_loss, _, _ = eval_step(valid_loader, model, num_classes, criterion, epoch)
|
69 |
+
scheduler.step(val_loss)
|
70 |
+
|
71 |
+
# scoring
|
72 |
+
elapsed = time.time() - start_time
|
73 |
+
wandb.log({"epoch": epoch + 1, "train_loss": train_loss, "val_loss": val_loss})
|
74 |
+
print(f"Epoch {epoch+1} - avg_train_loss: {train_loss:.4f} avg_val_loss: {val_loss:.4f} time: {elapsed:.0f}s")
|
75 |
+
except Exception as e:
|
76 |
+
logger.error(f"Epoch {epoch+1}, {e}")
|
77 |
+
|
78 |
+
torch.cuda.empty_cache()
|
79 |
+
gc.collect()
|
80 |
+
|
81 |
+
|
82 |
+
if __name__ == "__main__":
|
83 |
+
sweep_config = read_yaml(Cfg.sweep_config_path)
|
84 |
+
sweep_id = wandb.sweep(sweep_config, project="NewsClassifier")
|
85 |
+
wandb.agent(sweep_id, tune_loop, count=Cfg.sweep_runs)
|
newsclassifier/utils.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import pandas as pd
|
4 |
+
import yaml
|
5 |
+
|
6 |
+
from newsclassifier.config.config import Cfg, logger
|
7 |
+
|
8 |
+
|
9 |
+
def write_yaml(data: pd.DataFrame, filepath: str):
|
10 |
+
logger.info("Writing yaml file.")
|
11 |
+
os.makedirs(os.path.dirname(filepath), exist_ok=True)
|
12 |
+
with open(filepath, "w") as file:
|
13 |
+
yaml.dump(data, file, default_flow_style=False)
|
14 |
+
|
15 |
+
|
16 |
+
def read_yaml(file_path: str):
|
17 |
+
logger.info("Reading yamlfile")
|
18 |
+
with open(file_path, "r") as file:
|
19 |
+
params = yaml.load(file, Loader=yaml.FullLoader)
|
20 |
+
return params
|
notebooks/eda.ipynb
ADDED
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"## Setup"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "code",
|
12 |
+
"execution_count": 19,
|
13 |
+
"metadata": {},
|
14 |
+
"outputs": [],
|
15 |
+
"source": [
|
16 |
+
"# Imports\n",
|
17 |
+
"import pandas as pd\n",
|
18 |
+
"import matplotlib.pyplot as plt\n",
|
19 |
+
"import seaborn as sns\n",
|
20 |
+
"import ipywidgets as widgets\n",
|
21 |
+
"from wordcloud import WordCloud, STOPWORDS"
|
22 |
+
]
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"cell_type": "markdown",
|
26 |
+
"metadata": {},
|
27 |
+
"source": [
|
28 |
+
"## Data"
|
29 |
+
]
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"cell_type": "code",
|
33 |
+
"execution_count": 20,
|
34 |
+
"metadata": {},
|
35 |
+
"outputs": [
|
36 |
+
{
|
37 |
+
"data": {
|
38 |
+
"text/html": [
|
39 |
+
"<div>\n",
|
40 |
+
"<style scoped>\n",
|
41 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
42 |
+
" vertical-align: middle;\n",
|
43 |
+
" }\n",
|
44 |
+
"\n",
|
45 |
+
" .dataframe tbody tr th {\n",
|
46 |
+
" vertical-align: top;\n",
|
47 |
+
" }\n",
|
48 |
+
"\n",
|
49 |
+
" .dataframe thead th {\n",
|
50 |
+
" text-align: right;\n",
|
51 |
+
" }\n",
|
52 |
+
"</style>\n",
|
53 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
54 |
+
" <thead>\n",
|
55 |
+
" <tr style=\"text-align: right;\">\n",
|
56 |
+
" <th></th>\n",
|
57 |
+
" <th>Title</th>\n",
|
58 |
+
" <th>Publisher</th>\n",
|
59 |
+
" <th>DateTime</th>\n",
|
60 |
+
" <th>Link</th>\n",
|
61 |
+
" <th>Category</th>\n",
|
62 |
+
" </tr>\n",
|
63 |
+
" </thead>\n",
|
64 |
+
" <tbody>\n",
|
65 |
+
" <tr>\n",
|
66 |
+
" <th>0</th>\n",
|
67 |
+
" <td>Chainlink (LINK) Falters, Hedera (HBAR) Wobble...</td>\n",
|
68 |
+
" <td>Analytics Insight</td>\n",
|
69 |
+
" <td>2023-08-30T06:54:49Z</td>\n",
|
70 |
+
" <td>https://news.google.com/articles/CBMibGh0dHBzO...</td>\n",
|
71 |
+
" <td>Business</td>\n",
|
72 |
+
" </tr>\n",
|
73 |
+
" <tr>\n",
|
74 |
+
" <th>1</th>\n",
|
75 |
+
" <td>Funds punished for owning too few Nvidia share...</td>\n",
|
76 |
+
" <td>ZAWYA</td>\n",
|
77 |
+
" <td>2023-08-30T07:15:59Z</td>\n",
|
78 |
+
" <td>https://news.google.com/articles/CBMigwFodHRwc...</td>\n",
|
79 |
+
" <td>Business</td>\n",
|
80 |
+
" </tr>\n",
|
81 |
+
" <tr>\n",
|
82 |
+
" <th>2</th>\n",
|
83 |
+
" <td>Crude oil prices stalled as hedge funds sold: ...</td>\n",
|
84 |
+
" <td>ZAWYA</td>\n",
|
85 |
+
" <td>2023-08-30T07:31:31Z</td>\n",
|
86 |
+
" <td>https://news.google.com/articles/CBMibGh0dHBzO...</td>\n",
|
87 |
+
" <td>Business</td>\n",
|
88 |
+
" </tr>\n",
|
89 |
+
" <tr>\n",
|
90 |
+
" <th>3</th>\n",
|
91 |
+
" <td>Grayscale's Bitcoin Win Is Still Only Half the...</td>\n",
|
92 |
+
" <td>Bloomberg</td>\n",
|
93 |
+
" <td>2023-08-30T10:38:40Z</td>\n",
|
94 |
+
" <td>https://news.google.com/articles/CBMib2h0dHBzO...</td>\n",
|
95 |
+
" <td>Business</td>\n",
|
96 |
+
" </tr>\n",
|
97 |
+
" <tr>\n",
|
98 |
+
" <th>4</th>\n",
|
99 |
+
" <td>I'm a Home Shopping Editor, and These Are the ...</td>\n",
|
100 |
+
" <td>Better Homes & Gardens</td>\n",
|
101 |
+
" <td>2023-08-30T11:00:00Z</td>\n",
|
102 |
+
" <td>https://news.google.com/articles/CBMiPWh0dHBzO...</td>\n",
|
103 |
+
" <td>Business</td>\n",
|
104 |
+
" </tr>\n",
|
105 |
+
" </tbody>\n",
|
106 |
+
"</table>\n",
|
107 |
+
"</div>"
|
108 |
+
],
|
109 |
+
"text/plain": [
|
110 |
+
" Title Publisher \\\n",
|
111 |
+
"0 Chainlink (LINK) Falters, Hedera (HBAR) Wobble... Analytics Insight \n",
|
112 |
+
"1 Funds punished for owning too few Nvidia share... ZAWYA \n",
|
113 |
+
"2 Crude oil prices stalled as hedge funds sold: ... ZAWYA \n",
|
114 |
+
"3 Grayscale's Bitcoin Win Is Still Only Half the... Bloomberg \n",
|
115 |
+
"4 I'm a Home Shopping Editor, and These Are the ... Better Homes & Gardens \n",
|
116 |
+
"\n",
|
117 |
+
" DateTime Link \\\n",
|
118 |
+
"0 2023-08-30T06:54:49Z https://news.google.com/articles/CBMibGh0dHBzO... \n",
|
119 |
+
"1 2023-08-30T07:15:59Z https://news.google.com/articles/CBMigwFodHRwc... \n",
|
120 |
+
"2 2023-08-30T07:31:31Z https://news.google.com/articles/CBMibGh0dHBzO... \n",
|
121 |
+
"3 2023-08-30T10:38:40Z https://news.google.com/articles/CBMib2h0dHBzO... \n",
|
122 |
+
"4 2023-08-30T11:00:00Z https://news.google.com/articles/CBMiPWh0dHBzO... \n",
|
123 |
+
"\n",
|
124 |
+
" Category \n",
|
125 |
+
"0 Business \n",
|
126 |
+
"1 Business \n",
|
127 |
+
"2 Business \n",
|
128 |
+
"3 Business \n",
|
129 |
+
"4 Business "
|
130 |
+
]
|
131 |
+
},
|
132 |
+
"execution_count": 20,
|
133 |
+
"metadata": {},
|
134 |
+
"output_type": "execute_result"
|
135 |
+
}
|
136 |
+
],
|
137 |
+
"source": [
|
138 |
+
"# Data Ingestion\n",
|
139 |
+
"df = pd.read_csv(\"../dataset/news_dataset.csv\")\n",
|
140 |
+
"df.head()"
|
141 |
+
]
|
142 |
+
},
|
143 |
+
{
|
144 |
+
"cell_type": "code",
|
145 |
+
"execution_count": 21,
|
146 |
+
"metadata": {},
|
147 |
+
"outputs": [
|
148 |
+
{
|
149 |
+
"data": {
|
150 |
+
"text/plain": [
|
151 |
+
"Text(0.5, 1.0, 'Category Distribution')"
|
152 |
+
]
|
153 |
+
},
|
154 |
+
"execution_count": 21,
|
155 |
+
"metadata": {},
|
156 |
+
"output_type": "execute_result"
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"data": {
|
160 |
+
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA1sAAAHWCAYAAACBjZMqAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAABmgElEQVR4nO3dfXyP9f////trszOzE3OyEyczFkaW8I6FkJMlRHkrJecUDaHI3gkpKeUkJT5UpqITnb2d5dyczkmrSU4jmd5sSmxON7bn749+O75eNuylvcy4XS+X1+XidRzP43k8juN4vV573R3H8XzZjDFGAAAAAIAC5VLYBQAAAADArYiwBQAAAABOQNgCAAAAACcgbAEAAACAExC2AAAAAMAJCFsAAAAA4ASELQAAAABwAsIWAAAAADgBYQsAAAAAnICwBQBAARgzZoxsNtsNWVfTpk3VtGlT63l8fLxsNpu+/PLLG7L+Hj16qFKlSjdkXQBQlBG2AOAWdeDAAT399NOqXLmyPD095evrq4YNG+rtt9/WuXPnHO7vvffeU1xcXMEXehOKi4uTzWazHp6engoJCVF0dLSmTp2qU6dOFch6jhw5ojFjxigpKalA+itIN3NtAFBUFCvsAgAABW/x4sXq1KmTPDw81K1bN915553KzMzUhg0bNGzYMO3cuVMzZ850qM/33ntPpUuXVo8ePZxT9E1o7NixCgsL04ULF5SSkqL4+HgNHjxYkyZN0oIFCxQZGWm1HTlypEaMGOFQ/0eOHNHLL7+sSpUqqXbt2vlebvny5Q6t53pcrbZZs2YpOzvb6TUAQFFH2AKAW8zBgwfVuXNnhYaGavXq1QoODrbmxcTEaP/+/Vq8eHEhVuhcZ86ckbe3d4H01bp1a9WrV896Hhsbq9WrV6tt27Z66KGHtHv3bnl5eUmSihUrpmLFnPtn9ezZsypevLjc3d2dup5rcXNzK9T1A0BRwWWEAHCLmTBhgk6fPq0PPvjALmjlCA8P17PPPms9nz17tu6//36VLVtWHh4eqlGjhqZPn263TKVKlbRz506tXbvWurTu0nuGTp48qcGDB6tChQry8PBQeHi43njjjVxnP44fP66uXbvK19dX/v7+6t69u7Zv3y6bzZbrEsXVq1ercePG8vb2lr+/v9q3b6/du3fbtcm5T2rXrl164oknVLJkSTVq1EizZ8+WzWbTjz/+mGv7X3vtNbm6uup///tffnepnfvvv18vvfSSDh06pE8++SRXLZdasWKFGjVqJH9/f5UoUULVqlXTf/7zH0l/32f1r3/9S5LUs2dPa7/m7IemTZvqzjvvVGJiou677z4VL17cWvbye7ZyZGVl6T//+Y+CgoLk7e2thx56SIcPH7ZrU6lSpTzPTl7a57Vqy+uerTNnzui5556zXgPVqlXTW2+9JWOMXTubzaYBAwbo22+/1Z133ikPDw/VrFlTS5cuzXuHA0ARxpktALjFLFy4UJUrV9a9996br/bTp09XzZo19dBDD6lYsWJauHChnnnmGWVnZysmJkaSNGXKFA0cOFAlSpTQiy++KEkKDAyU9PfZliZNmuh///ufnn76aVWsWFGbNm1SbGysjh49qilTpkiSsrOz1a5dO23dulX9+/dX9erV9d///lfdu3fPVdPKlSvVunVrVa5cWWPGjNG5c+f0zjvvqGHDhvrhhx9yfdHv1KmT7rjjDr322msyxujf//63YmJiNHfuXN199912befOnaumTZuqXLlyjuxWO127dtV//vMfLV++XH379s2zzc6dO9W2bVtFRkZq7Nix8vDw0P79+7Vx40ZJUkREhMaOHatRo0bpqaeeUuPGjSXJ7rgdP35crVu3VufOnfXkk09a+/xKxo0bJ5vNphdeeEHHjh3TlClT1KJFCyUlJVln4PIjP7Vdyhijhx56SGvWrFHv3r1Vu3ZtLVu2TMOGDdP//vc/TZ482a79hg0b9PXXX+uZZ56Rj4+Ppk6dqo4dOyo5OVmlSpXKd50AcNMzAIBbRlpampFk2rdvn+9lzp49m2tadHS0qVy5st20mjVrmiZNmuRq+8orrxhvb2+zb98+u+kjRowwrq6uJjk52RhjzFdffWUkmSlTplhtsrKyzP33328kmdmzZ1vTa9eubcqWLWuOHz9uTdu+fbtxcXEx3bp1s6aNHj3aSDKPP/54rroef/xxExISYrKysqxpP/zwQ6515WX27NlGktm2bdsV2/j5+Zm77747Vy05Jk+ebCSZP/7444p9bNu27Yr1NGnSxEgyM2bMyHPepcdizZo1RpIpV66cSU9Pt6Z/8cUXRpJ5++23rWmhoaGme/fu1+zzarV1797dhIaGWs+//fZbI8m8+uqrdu3+/e9/G5vNZvbv329Nk2Tc3d3tpm3fvt1IMu+8806udQFAUcZlhABwC0lPT5ck+fj45HuZS894pKWl6c8//1STJk3066+/Ki0t7ZrLz58/X40bN1bJkiX1559/Wo8WLVooKytL69atkyQtXbpUbm5udmeCXFxcrLNnOY4ePaqkpCT16NFDAQEB1vTIyEi1bNlSS5YsyVVDv379ck3r1q2bjhw5ojVr1ljT5s6dKy8vL3Xs2PGa23UtJUqUuOqohP7+/pKk//73v9c9mISHh4d69uyZ7/bdunWzO/b//ve/FRwcnOc+K0hLliyRq6urBg0aZDf9ueeekzFG3333nd30Fi1aqEqVKtbzyMhI+fr66tdff3VqnQBwoxG2AOAW4uvrK0kODU2+ceNGtWjRwro3qkyZMta9QfkJW7/88ouWLl2qMmXK2D1atGghSTp27Jgk6dChQwoODlbx4sXtlg8PD7d7fujQIUlStWrVcq0rIiJCf/75p86cOWM3PSwsLFfbli1bKjg4WHPnzpX092WMn376qdq3b+9QGL2S06dPX7Wfxx57TA0bNlSfPn0UGBiozp0764svvnAoeJUrV86hwTDuuOMOu+c2m03h4eH67bff8t3H9Th06JBCQkJy7Y+IiAhr/qUqVqyYq4+SJUvqxIkTzisSAAoB92wBwC3E19dXISEh+vnnn/PV/sCBA2revLmqV6+uSZMmqUKFCnJ3d9eSJUs0efLkfAWD7OxstWzZUsOHD89zftWqVR3ahuuR1/1Irq6ueuKJJzRr1iy999572rhxo44cOaInn3zyH6/v999/V1paWq6geHlN69at05o1a7R48WItXbpUn3/+ue6//34tX75crq6u11yPI/dZ5deVfng5KysrXzUVhCutx1w2mAYAFHWELQC4xbRt21YzZ85UQkKCoqKirtp24cKFysjI0IIFC+zONlx66V2OK31Jr1Klik6fPm2dybqS0NBQrVmzxhq+PMf+/ftztZOkvXv35upjz549Kl26dL6Hdu/WrZsmTpyohQsX6rvvvlOZMmUUHR2dr2Wv5uOPP5aka/bl4uKi5s2bq3nz5po0aZJee+01vfjii1qzZo1atGhxxX16vX755Re758YY7d+/3+73wEqWLKmTJ0/mWvbQoUOqXLmy9dyR2kJDQ7Vy5UqdOnXK7uzWnj17rPkAcDviMkIAuMUMHz5c3t7e6tOnj1JTU3PNP3DggN5++21J/+8Mw6VnFNLS0jR79uxcy3l7e+f5Jf3RRx9VQkKCli1blmveyZMndfHiRUl/B5MLFy5o1qxZ1vzs7GxNmzbNbpng4GDVrl1bc+bMsVvfzz//rOXLl+vBBx+8ytbbi4yMVGRkpN5//3199dVX6ty58z/+LazVq1frlVdeUVhYmLp06XLFdn/99VeuaTk/DpyRkSFJVmjMa79ej48++sjuEtIvv/xSR48eVevWra1pVapU0ebNm5WZmWlNW7RoUa4h4h2p7cEHH1RWVpbeffddu+mTJ0+WzWazWz8A3E44swUAt5gqVapo3rx5euyxxxQREaFu3brpzjvvVGZmpjZt2qT58+dbv7PUqlUrubu7q127dnr66ad1+vRpzZo1S2XLltXRo0ft+q1bt66mT5+uV199VeHh4Spbtqzuv/9+DRs2TAsWLFDbtm3Vo0cP1a1bV2fOnNGOHTv05Zdf6rffflPp0qXVoUMH3XPPPXruuee0f/9+Va9eXQsWLLBCyaVnUt588021bt1aUVFR6t27tzX0u5+fn8aMGePQ/ujWrZuef/55SXL4EsLvvvtOe/bs0cWLF5WamqrVq1drxYoVCg0N1YIFC+Tp6XnFZceOHat169apTZs2Cg0N1bFjx/Tee++pfPnyatSokaS/j5W/v79mzJghHx8feXt7q379+nneg5YfAQEBatSokXr27KnU1FRNmTJF4eHhdoOS9OnTR19++aUeeOABPfroozpw4IA++eQTuwErHK2tXbt2atasmV588UX99ttvuuuuu7R8+XL997//1eDBg3P1DQC3jcIdDBEA4Cz79u0zffv2NZUqVTLu7u7Gx8fHNGzY0Lzzzjvm/PnzVrsFCxaYyMhI4+npaSpVqmTeeOMN8+GHHxpJ5uDBg1a7lJQU06ZNG+Pj42Mk2Q0TfurUKRMbG2vCw8ONu7u7KV26tLn33nvNW2+9ZTIzM612f/zxh3niiSeMj4+P8fPzMz169DAbN240ksxnn31mV//KlStNw4YNjZeXl/H19TXt2rUzu3btsmuTM9z61YZXP3r0qHF1dTVVq1bN977LGfo95+Hu7m6CgoJMy5Ytzdtvv203vPrlteRYtWqVad++vQkJCTHu7u4mJCTEPP7447mGyP/vf/9ratSoYYoVK2Y31HqTJk1MzZo186zvSkO/f/rppyY2NtaULVvWeHl5mTZt2phDhw7lWn7ixImmXLlyxsPDwzRs2NB8//33ufq8Wm2XD/1uzN+vgSFDhpiQkBDj5uZm7rjjDvPmm2+a7Oxsu3aSTExMTK6arjQkPQAUZTZjuBsVAFB4vv32Wz388MPasGGDGjZsWOD9//nnnwoODtaoUaP00ksvFXj/AABcCfdsAQBumHPnztk9z8rK0jvvvCNfX1/VqVPHKeuMi4tTVlaWunbt6pT+AQC4Eu7ZAgDcMAMHDtS5c+cUFRWljIwMff3119q0aZNee+21Ah/mfPXq1dq1a5fGjRunDh06qFKlSgXaPwAA18JlhACAG2bevHmaOHGi9u/fr/Pnzys8PFz9+/fXgAEDCnxdTZs21aZNm9SwYUN98sknKleuXIGvAwCAqyFsAQAAAIATcM8WAAAAADgBYQsAAAAAnIABMvIhOztbR44ckY+Pj92PbgIAAAC4vRhjdOrUKYWEhMjF5RrnrgrxN77MxYsXzciRI02lSpWMp6enqVy5shk7dqzdDyBmZ2ebl156yQQFBRlPT0/TvHnzXD8Iefz4cbsfyezVq5c5deqUXZvt27ebRo0aGQ8PD1O+fHnzxhtv5LvOw4cP2/24JQ8ePHjw4MGDBw8ePG7vx+HDh6+ZIwr1zNYbb7yh6dOna86cOapZs6a+//579ezZU35+fho0aJAkacKECZo6darmzJmjsLAwvfTSS4qOjtauXbvk6ekpSerSpYuOHj2qFStW6MKFC+rZs6eeeuopzZs3T5KUnp6uVq1aqUWLFpoxY4Z27NihXr16yd/fX0899dQ16/Tx8ZEkHT58WL6+vk7aGwAAAABudunp6apQoYKVEa6mUEcjbNu2rQIDA/XBBx9Y0zp27CgvLy998sknMsYoJCREzz33nJ5//nlJUlpamgIDAxUXF6fOnTtr9+7dqlGjhrZt26Z69epJkpYuXaoHH3xQv//+u0JCQjR9+nS9+OKLSklJkbu7uyRpxIgR+vbbb7Vnz55r1pmeni4/Pz+lpaURtgAAAIDbmCPZoFAHyLj33nu1atUq7du3T5K0fft2bdiwQa1bt5YkHTx4UCkpKWrRooW1jJ+fn+rXr6+EhARJUkJCgvz9/a2gJUktWrSQi4uLtmzZYrW57777rKAlSdHR0dq7d69OnDiRq66MjAylp6fbPQAAAADAEYV6GeGIESOUnp6u6tWry9XVVVlZWRo3bpy6dOkiSUpJSZEkBQYG2i0XGBhozUtJSVHZsmXt5hcrVkwBAQF2bcLCwnL1kTOvZMmSdvPGjx+vl19+uYC2EgAAAMDtqFDPbH3xxReaO3eu5s2bpx9++EFz5szRW2+9pTlz5hRmWYqNjVVaWpr1OHz4cKHWAwAAAKDoKdQzW8OGDdOIESPUuXNnSVKtWrV06NAhjR8/Xt27d1dQUJAkKTU1VcHBwdZyqampql27tiQpKChIx44ds+v34sWL+uuvv6zlg4KClJqaatcm53lOm0t5eHjIw8OjYDYSAAAAwG2pUM9snT17NtfY9K6ursrOzpYkhYWFKSgoSKtWrbLmp6ena8uWLYqKipIkRUVF6eTJk0pMTLTarF69WtnZ2apfv77VZt26dbpw4YLVZsWKFapWrVquSwgBAAAAoCAUathq166dxo0bp8WLF+u3337TN998o0mTJunhhx+WJNlsNg0ePFivvvqqFixYoB07dqhbt24KCQlRhw4dJEkRERF64IEH1LdvX23dulUbN27UgAED1LlzZ4WEhEiSnnjiCbm7u6t3797auXOnPv/8c7399tsaOnRoYW06AAAAgFtcoQ79furUKb300kv65ptvdOzYMYWEhOjxxx/XqFGjrJEDjTEaPXq0Zs6cqZMnT6pRo0Z67733VLVqVaufv/76SwMGDNDChQvl4uKijh07aurUqSpRooTV5qefflJMTIy2bdum0qVLa+DAgXrhhRfyVSdDvwMAAACQHMsGhRq2igrCFgAAAACpCP3OFgAAAADcqghbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATkDYAgAAAAAnKFbYBdxq6g77qLBLKJIS3+xW2CUAAAAABYozWwAAAADgBIQtAAAAAHACwhYAAAAAOAFhCwAAAACcgLAFAAAAAE5A2AIAAAAAJyBsAQAAAIATELYAAAAAwAkIWwAAAADgBIQtAAAAAHACwhYAAAAAOAFhCwAAAACcgLAFAAAAAE5A2AIAAAAAJyBsAQAAAIATELYAAAAAwAkIWwAAAADgBIQtAAAAAHACwhYAAAAAOAFhCwAAAACcgLAFAAAAAE5A2AIAAAAAJyBsAQAAAIATELYAAAAAwAkIWwAAAADgBMUKuwCgoNUd9lFhl1DkJL7ZrbBLAAAAuOVwZgsAAAAAnICwBQAAAABOQNgCAAAAACco1LBVqVIl2Wy2XI+YmBhJ0vnz5xUTE6NSpUqpRIkS6tixo1JTU+36SE5OVps2bVS8eHGVLVtWw4YN08WLF+3axMfHq06dOvLw8FB4eLji4uJu1CYCAAAAuE0Vatjatm2bjh49aj1WrFghSerUqZMkaciQIVq4cKHmz5+vtWvX6siRI3rkkUes5bOystSmTRtlZmZq06ZNmjNnjuLi4jRq1CirzcGDB9WmTRs1a9ZMSUlJGjx4sPr06aNly5bd2I0FAAAAcFsp1NEIy5QpY/f89ddfV5UqVdSkSROlpaXpgw8+0Lx583T//fdLkmbPnq2IiAht3rxZDRo00PLly7Vr1y6tXLlSgYGBql27tl555RW98MILGjNmjNzd3TVjxgyFhYVp4sSJkqSIiAht2LBBkydPVnR09A3fZgAAAAC3h5vmnq3MzEx98skn6tWrl2w2mxITE3XhwgW1aNHCalO9enVVrFhRCQkJkqSEhATVqlVLgYGBVpvo6Gilp6dr586dVptL+8hpk9NHXjIyMpSenm73AAAAAABH3DS/s/Xtt9/q5MmT6tGjhyQpJSVF7u7u8vf3t2sXGBiolJQUq82lQStnfs68q7VJT0/XuXPn5OXllauW8ePH6+WXXy6IzQJuS/zWmeP4rTMAAG49N82ZrQ8++ECtW7dWSEhIYZei2NhYpaWlWY/Dhw8XdkkAAAAAipib4szWoUOHtHLlSn399dfWtKCgIGVmZurkyZN2Z7dSU1MVFBRktdm6datdXzmjFV7a5vIRDFNTU+Xr65vnWS1J8vDwkIeHxz/eLgAAAAC3r5vizNbs2bNVtmxZtWnTxppWt25dubm5adWqVda0vXv3Kjk5WVFRUZKkqKgo7dixQ8eOHbParFixQr6+vqpRo4bV5tI+ctrk9AEAAAAAzlDoYSs7O1uzZ89W9+7dVazY/zvR5ufnp969e2vo0KFas2aNEhMT1bNnT0VFRalBgwaSpFatWqlGjRrq2rWrtm/frmXLlmnkyJGKiYmxzkz169dPv/76q4YPH649e/bovffe0xdffKEhQ4YUyvYCAAAAuD0U+mWEK1euVHJysnr16pVr3uTJk+Xi4qKOHTsqIyND0dHReu+996z5rq6uWrRokfr376+oqCh5e3ure/fuGjt2rNUmLCxMixcv1pAhQ/T222+rfPnyev/99xn2HQAAAIBTFXrYatWqlYwxec7z9PTUtGnTNG3atCsuHxoaqiVLllx1HU2bNtWPP/74j+oEAAAAAEcU+mWEAAAAAHArKvQzWwCAgsdvnTmO3zoDABQ0whYAAE5A4HUcgRfArYbLCAEAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAExQq7AAAAgIJWd9hHhV1CkZT4ZrfCLgG4pXBmCwAAAACcgLAFAAAAAE5A2AIAAAAAJyBsAQAAAIATELYAAAAAwAkIWwAAAADgBIQtAAAAAHACwhYAAAAAOAFhCwAAAACcgLAFAAAAAE5A2AIAAAAAJyBsAQAAAIATELYAAAAAwAkIWwAAAADgBIQtAAAAAHACwhYAAAAAOAFhCwAAAACcgLAFAAAAAE5A2AIAAAAAJyBsAQAAAIATELYAAAAAwAkIWwAAAADgBIUetv73v//pySefVKlSpeTl5aVatWrp+++/t+YbYzRq1CgFBwfLy8tLLVq00C+//GLXx19//aUuXbrI19dX/v7+6t27t06fPm3X5qefflLjxo3l6empChUqaMKECTdk+wAAAADcngo1bJ04cUINGzaUm5ubvvvuO+3atUsTJ05UyZIlrTYTJkzQ1KlTNWPGDG3ZskXe3t6Kjo7W+fPnrTZdunTRzp07tWLFCi1atEjr1q3TU089Zc1PT09Xq1atFBoaqsTERL355psaM2aMZs6ceUO3FwAAAMDto1hhrvyNN95QhQoVNHv2bGtaWFiY9W9jjKZMmaKRI0eqffv2kqSPPvpIgYGB+vbbb9W5c2ft3r1bS5cu1bZt21SvXj1J0jvvvKMHH3xQb731lkJCQjR37lxlZmbqww8/lLu7u2rWrKmkpCRNmjTJLpQBAAAAQEEp1DNbCxYsUL169dSpUyeVLVtWd999t2bNmmXNP3jwoFJSUtSiRQtrmp+fn+rXr6+EhARJUkJCgvz9/a2gJUktWrSQi4uLtmzZYrW577775O7ubrWJjo7W3r17deLEiVx1ZWRkKD093e4BAAAAAI4o1LD166+/avr06brjjju0bNky9e/fX4MGDdKcOXMkSSkpKZKkwMBAu+UCAwOteSkpKSpbtqzd/GLFiikgIMCuTV59XLqOS40fP15+fn7Wo0KFCgWwtQAAAABuJ4UatrKzs1WnTh299tpruvvuu/XUU0+pb9++mjFjRmGWpdjYWKWlpVmPw4cPF2o9AAAAAIqeQg1bwcHBqlGjht20iIgIJScnS5KCgoIkSampqXZtUlNTrXlBQUE6duyY3fyLFy/qr7/+smuTVx+XruNSHh4e8vX1tXsAAAAAgCMKNWw1bNhQe/futZu2b98+hYaGSvp7sIygoCCtWrXKmp+enq4tW7YoKipKkhQVFaWTJ08qMTHRarN69WplZ2erfv36Vpt169bpwoULVpsVK1aoWrVqdiMfAgAAAEBBKdSwNWTIEG3evFmvvfaa9u/fr3nz5mnmzJmKiYmRJNlsNg0ePFivvvqqFixYoB07dqhbt24KCQlRhw4dJP19JuyBBx5Q3759tXXrVm3cuFEDBgxQ586dFRISIkl64okn5O7urt69e2vnzp36/PPP9fbbb2vo0KGFtekAAAAAbnGFOvT7v/71L33zzTeKjY3V2LFjFRYWpilTpqhLly5Wm+HDh+vMmTN66qmndPLkSTVq1EhLly6Vp6en1Wbu3LkaMGCAmjdvLhcXF3Xs2FFTp0615vv5+Wn58uWKiYlR3bp1Vbp0aY0aNYph3wEAAAA4TaGGLUlq27at2rZte8X5NptNY8eO1dixY6/YJiAgQPPmzbvqeiIjI7V+/frrrhMAAAAAHFGolxECAAAAwK2KsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcAKHw9YPP/ygHTt2WM//+9//qkOHDvrPf/6jzMzMAi0OAAAAAIoqh8PW008/rX379kmSfv31V3Xu3FnFixfX/PnzNXz48AIvEAAAAACKIofD1r59+1S7dm1J0vz583Xfffdp3rx5iouL01dffVXQ9QEAAABAkeRw2DLGKDs7W5K0cuVKPfjgg5KkChUq6M8//yzY6gAAAACgiHI4bNWrV0+vvvqqPv74Y61du1Zt2rSRJB08eFCBgYEFXiAAAAAAFEUOh60pU6YoMTFRAwYM0Isvvqjw8HBJ0pdffql77723wAsEAAAAgKLI4bAVGRmpn3/+WWlpaRo9erQ1/c0339ScOXMc6mvMmDGy2Wx2j+rVq1vzz58/r5iYGJUqVUolSpRQx44dlZqaatdHcnKy2rRpo+LFi6ts2bIaNmyYLl68aNcmPj5ederUkYeHh8LDwxUXF+foZgMAAACAQxwOW6NGjdKaNWuUkZFhN93T01Nubm4OF1CzZk0dPXrUemzYsMGaN2TIEC1cuFDz58/X2rVrdeTIET3yyCPW/KysLLVp00aZmZnatGmT5syZo7i4OI0aNcpqc/DgQbVp00bNmjVTUlKSBg8erD59+mjZsmUO1woAAAAA+VXM0QUSEhI0adIkXbx4Uf/617/UpEkTNW3aVA0bNpSXl5fjBRQrpqCgoFzT09LS9MEHH2jevHm6//77JUmzZ89WRESENm/erAYNGmj58uXatWuXVq5cqcDAQNWuXVuvvPKKXnjhBY0ZM0bu7u6aMWOGwsLCNHHiRElSRESENmzYoMmTJys6OtrhegEAAAAgPxw+s7VixQqdPHlSq1at0oMPPqjvv/9ejzzyiPz9/dWoUSOHC/jll18UEhKiypUrq0uXLkpOTpYkJSYm6sKFC2rRooXVtnr16qpYsaISEhIk/R38atWqZTcwR3R0tNLT07Vz506rzaV95LTJ6SMvGRkZSk9Pt3sAAAAAgCMcPrMl/X02qmHDhipTpowCAgLk4+Ojb7/9Vnv27HGon/r16ysuLk7VqlXT0aNH9fLLL6tx48b6+eeflZKSInd3d/n7+9stExgYqJSUFElSSkpKrhEQc55fq016errOnTuX59m48ePH6+WXX3ZoWwAAAADgUg6HrZkzZyo+Pl5r165VRkaGGjdurKZNm2rkyJGKjIx0qK/WrVtb/46MjFT9+vUVGhqqL7744rouSSwosbGxGjp0qPU8PT1dFSpUKLR6AAAAABQ9Doetfv36qUyZMnruuef0zDPPqESJEgVWjL+/v6pWrar9+/erZcuWyszM1MmTJ+3ObqWmplr3eAUFBWnr1q12feSMVnhpm8tHMExNTZWvr+8VA52Hh4c8PDwKarMAAAAA3IYcvmfr66+/VpcuXfTZZ5+pTJkyuvfee/Wf//xHy5cv19mzZ/9RMadPn9aBAwcUHBysunXrys3NTatWrbLm7927V8nJyYqKipIkRUVFaceOHTp27JjVZsWKFfL19VWNGjWsNpf2kdMmpw8AAAAAcAaHz2x16NBBHTp0kPT3iIHr16/X/Pnz1bZtW7m4uOj8+fP57uv5559Xu3btFBoaqiNHjmj06NFydXXV448/Lj8/P/Xu3VtDhw5VQECAfH19NXDgQEVFRalBgwaSpFatWqlGjRrq2rWrJkyYoJSUFI0cOVIxMTHWmal+/frp3Xff1fDhw9WrVy+tXr1aX3zxhRYvXuzopgMAAABAvl3XABnHjx/X2rVrFR8fr/j4eO3cuVMlS5ZU48aNHern999/1+OPP67jx4+rTJkyatSokTZv3qwyZcpIkiZPniwXFxd17NhRGRkZio6O1nvvvWct7+rqqkWLFql///6KioqSt7e3unfvrrFjx1ptwsLCtHjxYg0ZMkRvv/22ypcvr/fff59h3wEAAAA4lcNhq1atWtq9e7dKliyp++67T3379lWTJk0cHhxDkj777LOrzvf09NS0adM0bdq0K7YJDQ3VkiVLrtpP06ZN9eOPPzpcHwAAAK5P3WEfFXYJRVLim90KuwQUoOsaIKNJkya68847nVEPAAAAANwSHA5bMTExkqTMzEwdPHhQVapUUbFi13U1IgAAAADcshwejfDcuXPq3bu3ihcvrpo1ayo5OVmSNHDgQL3++usFXiAAAAAAFEUOh60RI0Zo+/btio+Pl6enpzW9RYsW+vzzzwu0OAAAAAAoqhy+/u/bb7/V559/rgYNGshms1nTa9asqQMHDhRocQAAAABQVDl8ZuuPP/5Q2bJlc00/c+aMXfgCAAAAgNuZw2GrXr16dj8InBOw3n//fUVFRRVcZQAAAABQhDl8GeFrr72m1q1ba9euXbp48aLefvtt7dq1S5s2bdLatWudUSMAAAAAFDkOn9lq1KiRkpKSdPHiRdWqVUvLly9X2bJllZCQoLp16zqjRgAAAAAocq7rB7KqVKmiWbNmFXQtAAAAAHDLcPjMFgAAAADg2vJ9ZsvFxeWaow3abDZdvHjxHxcFAAAAAEVdvsPWN998c8V5CQkJmjp1qrKzswukKAAAAAAo6vIdttq3b59r2t69ezVixAgtXLhQXbp00dixYwu0OAAAAAAoqq7rnq0jR46ob9++qlWrli5evKikpCTNmTNHoaGhBV0fAAAAABRJDoWttLQ0vfDCCwoPD9fOnTu1atUqLVy4UHfeeaez6gMAAACAIinflxFOmDBBb7zxhoKCgvTpp5/meVkhAAAAAOBv+Q5bI0aMkJeXl8LDwzVnzhzNmTMnz3Zff/11gRUHAAAAAEVVvsNWt27drjn0OwAAAADgb/kOW3FxcU4sAwAAAABuLdc1GiEAAAAA4OoIWwAAAADgBIQtAAAAAHCCfN+zBQAAAKDoqDvso8IuoUhKfLNbgfWVrzNbderU0YkTJyRJY8eO1dmzZwusAAAAAAC4FeUrbO3evVtnzpyRJL388ss6ffq0U4sCAAAAgKIuX5cR1q5dWz179lSjRo1kjNFbb72lEiVK5Nl21KhRBVogAAAAABRF+QpbcXFxGj16tBYtWiSbzabvvvtOxYrlXtRmsxG2AAAAAED5DFvVqlXTZ599JklycXHRqlWrVLZsWacWBgAAAABFmcOjEWZnZzujDgAAAAC4pVzX0O8HDhzQlClTtHv3bklSjRo19Oyzz6pKlSoFWhwAAAAAFFUO/6jxsmXLVKNGDW3dulWRkZGKjIzUli1bVLNmTa1YscIZNQIAAABAkePwma0RI0ZoyJAhev3113NNf+GFF9SyZcsCKw4AAAAAiiqHz2zt3r1bvXv3zjW9V69e2rVrV4EUBQAAAABFncNhq0yZMkpKSso1PSkpiREKAQAAAOD/5/BlhH379tVTTz2lX3/9Vffee68kaePGjXrjjTc0dOjQAi8QAAAAAIoih8PWSy+9JB8fH02cOFGxsbGSpJCQEI0ZM0aDBg0q8AIBAAAAoChy+DJCm82mIUOG6Pfff1daWprS0tL0+++/69lnn5XNZrvuQl5//XXZbDYNHjzYmnb+/HnFxMSoVKlSKlGihDp27KjU1FS75ZKTk9WmTRsVL15cZcuW1bBhw3Tx4kW7NvHx8apTp448PDwUHh6uuLi4664TAAAAAPLD4bB1KR8fH/n4+PzjIrZt26b/+7//U2RkpN30IUOGaOHChZo/f77Wrl2rI0eO6JFHHrHmZ2VlqU2bNsrMzNSmTZs0Z84cxcXFadSoUVabgwcPqk2bNmrWrJmSkpI0ePBg9enTR8uWLfvHdQMAAADAlfyjsFUQTp8+rS5dumjWrFkqWbKkNT0tLU0ffPCBJk2apPvvv19169bV7NmztWnTJm3evFmStHz5cu3atUuffPKJateurdatW+uVV17RtGnTlJmZKUmaMWOGwsLCNHHiREVERGjAgAH697//rcmTJ1+xpoyMDKWnp9s9AAAAAMARhR62YmJi1KZNG7Vo0cJuemJioi5cuGA3vXr16qpYsaISEhIkSQkJCapVq5YCAwOtNtHR0UpPT9fOnTutNpf3HR0dbfWRl/Hjx8vPz896VKhQ4R9vJwAAAIDbS6GGrc8++0w//PCDxo8fn2teSkqK3N3d5e/vbzc9MDBQKSkpVptLg1bO/Jx5V2uTnp6uc+fO5VlXbGysdT9aWlqaDh8+fF3bBwAAAOD25VDYunDhgpo3b65ffvnlH6/48OHDevbZZzV37lx5enr+4/4KkoeHh3x9fe0eAAAAAOAIh8KWm5ubfvrppwJZcWJioo4dO6Y6deqoWLFiKlasmNauXaupU6eqWLFiCgwMVGZmpk6ePGm3XGpqqoKCgiRJQUFBuUYnzHl+rTa+vr7y8vIqkG0BAAAAgMs5fBnhk08+qQ8++OAfr7h58+basWOHkpKSrEe9evXUpUsX699ubm5atWqVtczevXuVnJysqKgoSVJUVJR27NihY8eOWW1WrFghX19f1ahRw2pzaR85bXL6AAAAAABncPhHjS9evKgPP/xQK1euVN26deXt7W03f9KkSfnqx8fHR3feeafdNG9vb5UqVcqa3rt3bw0dOlQBAQHy9fXVwIEDFRUVpQYNGkiSWrVqpRo1aqhr166aMGGCUlJSNHLkSMXExMjDw0OS1K9fP7377rsaPny4evXqpdWrV+uLL77Q4sWLHd10AAAAAMg3h8PWzz//rDp16kiS9u3bZzfvn/yocV4mT54sFxcXdezYURkZGYqOjtZ7771nzXd1ddWiRYvUv39/RUVFydvbW927d9fYsWOtNmFhYVq8eLGGDBmit99+W+XLl9f777+v6OjoAq0VAAAAAC7lcNhas2aNM+qQJMXHx9s99/T01LRp0zRt2rQrLhMaGqolS5Zctd+mTZvqxx9/LIgSAQAAACBfrnvo9/3792vZsmXW8OnGmAIrCgAAAACKOofD1vHjx9W8eXNVrVpVDz74oI4ePSrp7/urnnvuuQIvEAAAAACKIofD1pAhQ+Tm5qbk5GQVL17cmv7YY49p6dKlBVocAAAAABRVDt+ztXz5ci1btkzly5e3m37HHXfo0KFDBVYYAAAAABRlDp/ZOnPmjN0ZrRx//fWXNdw6AAAAANzuHA5bjRs31kcffWQ9t9lsys7O1oQJE9SsWbMCLQ4AAAAAiiqHLyOcMGGCmjdvru+//16ZmZkaPny4du7cqb/++ksbN250Ro0AAAAAUOQ4fGbrzjvv1L59+9SoUSO1b99eZ86c0SOPPKIff/xRVapUcUaNAAAAAFDkOHxmS5L8/Pz04osvFnQtAAAAAHDLuK6wdeLECX3wwQfavXu3JKlGjRrq2bOnAgICCrQ4AAAAACiqHL6McN26dapUqZKmTp2qEydO6MSJE5o6darCwsK0bt06Z9QIAAAAAEWOw2e2YmJi9Nhjj2n69OlydXWVJGVlZemZZ55RTEyMduzYUeBFAgAAAEBR4/CZrf379+u5556zgpYkubq6aujQodq/f3+BFgcAAAAARZXDYatOnTrWvVqX2r17t+66664CKQoAAAAAirp8XUb4008/Wf8eNGiQnn32We3fv18NGjSQJG3evFnTpk3T66+/7pwqAQAAAKCIyVfYql27tmw2m4wx1rThw4fnavfEE0/oscceK7jqAAAAAKCIylfYOnjwoLPrAAAAAIBbSr7CVmhoqLPrAAAAAIBbynX9qPGRI0e0YcMGHTt2TNnZ2XbzBg0aVCCFAQAAAEBR5nDYiouL09NPPy13d3eVKlVKNpvNmmez2QhbAAAAAKDrCFsvvfSSRo0apdjYWLm4ODxyPAAAAADcFhxOS2fPnlXnzp0JWgAAAABwFQ4npt69e2v+/PnOqAUAAAAAbhkOX0Y4fvx4tW3bVkuXLlWtWrXk5uZmN3/SpEkFVhwAAAAAFFXXFbaWLVumatWqSVKuATIAAAAAANcRtiZOnKgPP/xQPXr0cEI5AAAAAHBrcPieLQ8PDzVs2NAZtQAAAADALcPhsPXss8/qnXfecUYtAAAAAHDLcPgywq1bt2r16tVatGiRatasmWuAjK+//rrAigMAAACAosrhsOXv769HHnnEGbUAAAAAwC3D4bA1e/ZsZ9QBAAAAALcUh+/ZAgAAAABcm8NntsLCwq76e1q//vrrPyoIAAAAAG4FDoetwYMH2z2/cOGCfvzxRy1dulTDhg0rqLoAAAAAoEhzOGw9++yzeU6fNm2avv/++39cEAAAAADcCgrsnq3WrVvrq6++KqjuAAAAAKBIK7Cw9eWXXyogIMChZaZPn67IyEj5+vrK19dXUVFR+u6776z558+fV0xMjEqVKqUSJUqoY8eOSk1NtesjOTlZbdq0UfHixVW2bFkNGzZMFy9etGsTHx+vOnXqyMPDQ+Hh4YqLi7vu7QQAAACA/HD4MsK7777bboAMY4xSUlL0xx9/6L333nOor/Lly+v111/XHXfcIWOM5syZo/bt2+vHH39UzZo1NWTIEC1evFjz58+Xn5+fBgwYoEceeUQbN26UJGVlZalNmzYKCgrSpk2bdPToUXXr1k1ubm567bXXJEkHDx5UmzZt1K9fP82dO1erVq1Snz59FBwcrOjoaEc3HwAAAADyxeGw1aFDB7vnLi4uKlOmjJo2barq1as71Fe7du3sno8bN07Tp0/X5s2bVb58eX3wwQeaN2+e7r//fkl//8ZXRESENm/erAYNGmj58uXatWuXVq5cqcDAQNWuXVuvvPKKXnjhBY0ZM0bu7u6aMWOGwsLCNHHiRElSRESENmzYoMmTJxO2AAAAADiNw2Fr9OjRzqhDWVlZmj9/vs6cOaOoqCglJibqwoULatGihdWmevXqqlixohISEtSgQQMlJCSoVq1aCgwMtNpER0erf//+2rlzp+6++24lJCTY9ZHT5vJRFS+VkZGhjIwM63l6enrBbSgAAACA20Kh/6jxjh07VKJECXl4eKhfv3765ptvVKNGDaWkpMjd3V3+/v527QMDA5WSkiJJSklJsQtaOfNz5l2tTXp6us6dO5dnTePHj5efn5/1qFChQkFsKgAAAIDbSL7DlouLi1xdXa/6KFbM4RNlqlatmpKSkrRlyxb1799f3bt3165duxzupyDFxsYqLS3Nehw+fLhQ6wEAAABQ9OQ7HX3zzTdXnJeQkKCpU6cqOzvb4QLc3d0VHh4uSapbt662bdumt99+W4899pgyMzN18uRJu7NbqampCgoKkiQFBQVp69atdv3ljFZ4aZvLRzBMTU2Vr6+vvLy88qzJw8NDHh4eDm8LAAAAAOTId9hq3759rml79+7ViBEjtHDhQnXp0kVjx479xwVlZ2crIyNDdevWlZubm1atWqWOHTta60tOTlZUVJQkKSoqSuPGjdOxY8dUtmxZSdKKFSvk6+urGjVqWG2WLFlit44VK1ZYfQAAAACAMzh+3Z+kI0eOaPTo0ZozZ46io6OVlJSkO++80+F+YmNj1bp1a1WsWFGnTp3SvHnzFB8fr2XLlsnPz0+9e/fW0KFDFRAQIF9fXw0cOFBRUVFq0KCBJKlVq1aqUaOGunbtqgkTJiglJUUjR45UTEyMdWaqX79+evfddzV8+HD16tVLq1ev1hdffKHFixdfz6YDAAAAQL44FLbS0tL02muv6Z133lHt2rW1atUqNW7c+LpXfuzYMXXr1k1Hjx6Vn5+fIiMjtWzZMrVs2VKSNHnyZLm4uKhjx47KyMhQdHS03W95ubq6atGiRerfv7+ioqLk7e2t7t27251hCwsL0+LFizVkyBC9/fbbKl++vN5//32GfQcAAADgVPkOWxMmTNAbb7yhoKAgffrpp3leVuioDz744KrzPT09NW3aNE2bNu2KbUJDQ3NdJni5pk2b6scff7yuGgEAAADgeuQ7bI0YMUJeXl4KDw/XnDlzNGfOnDzbff311wVWHAAAAAAUVfkOW926dZPNZnNmLQAAAABwy8h32IqLi3NiGQAAAABwa8n3jxoDAAAAAPKPsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATlCoYWv8+PH617/+JR8fH5UtW1YdOnTQ3r177dqcP39eMTExKlWqlEqUKKGOHTsqNTXVrk1ycrLatGmj4sWLq2zZsho2bJguXrxo1yY+Pl516tSRh4eHwsPDFRcX5+zNAwAAAHAbK9SwtXbtWsXExGjz5s1asWKFLly4oFatWunMmTNWmyFDhmjhwoWaP3++1q5dqyNHjuiRRx6x5mdlZalNmzbKzMzUpk2bNGfOHMXFxWnUqFFWm4MHD6pNmzZq1qyZkpKSNHjwYPXp00fLli27odsLAAAA4PZRrDBXvnTpUrvncXFxKlu2rBITE3XfffcpLS1NH3zwgebNm6f7779fkjR79mxFRERo8+bNatCggZYvX65du3Zp5cqVCgwMVO3atfXKK6/ohRde0JgxY+Tu7q4ZM2YoLCxMEydOlCRFRERow4YNmjx5sqKjo2/4dgMAAAC49d1U92ylpaVJkgICAiRJiYmJunDhglq0aGG1qV69uipWrKiEhARJUkJCgmrVqqXAwECrTXR0tNLT07Vz506rzaV95LTJ6eNyGRkZSk9Pt3sAAAAAgCNumrCVnZ2twYMHq2HDhrrzzjslSSkpKXJ3d5e/v79d28DAQKWkpFhtLg1aOfNz5l2tTXp6us6dO5erlvHjx8vPz896VKhQoUC2EQAAAMDt46YJWzExMfr555/12WefFXYpio2NVVpamvU4fPhwYZcEAAAAoIgp1Hu2cgwYMECLFi3SunXrVL58eWt6UFCQMjMzdfLkSbuzW6mpqQoKCrLabN261a6/nNEKL21z+QiGqamp8vX1lZeXV656PDw85OHhUSDbBgAAAOD2VKhntowxGjBggL755hutXr1aYWFhdvPr1q0rNzc3rVq1ypq2d+9eJScnKyoqSpIUFRWlHTt26NixY1abFStWyNfXVzVq1LDaXNpHTpucPgAAAACgoBXqma2YmBjNmzdP//3vf+Xj42PdY+Xn5ycvLy/5+fmpd+/eGjp0qAICAuTr66uBAwcqKipKDRo0kCS1atVKNWrUUNeuXTVhwgSlpKRo5MiRiomJsc5O9evXT++++66GDx+uXr16afXq1friiy+0ePHiQtt2AAAAALe2Qj2zNX36dKWlpalp06YKDg62Hp9//rnVZvLkyWrbtq06duyo++67T0FBQfr666+t+a6urlq0aJFcXV0VFRWlJ598Ut26ddPYsWOtNmFhYVq8eLFWrFihu+66SxMnTtT777/PsO8AAAAAnKZQz2wZY67ZxtPTU9OmTdO0adOu2CY0NFRLliy5aj9NmzbVjz/+6HCNAAAAAHA9bprRCAEAAADgVkLYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4AWELAAAAAJyAsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQo1bK1bt07t2rVTSEiIbDabvv32W7v5xhiNGjVKwcHB8vLyUosWLfTLL7/Ytfnrr7/UpUsX+fr6yt/fX71799bp06ft2vz0009q3LixPD09VaFCBU2YMMHZmwYAAADgNleoYevMmTO66667NG3atDznT5gwQVOnTtWMGTO0ZcsWeXt7Kzo6WufPn7fadOnSRTt37tSKFSu0aNEirVu3Tk899ZQ1Pz09Xa1atVJoaKgSExP15ptvasyYMZo5c6bTtw8AAADA7atYYa68devWat26dZ7zjDGaMmWKRo4cqfbt20uSPvroIwUGBurbb79V586dtXv3bi1dulTbtm1TvXr1JEnvvPOOHnzwQb311lsKCQnR3LlzlZmZqQ8//FDu7u6qWbOmkpKSNGnSJLtQBgAAAAAF6aa9Z+vgwYNKSUlRixYtrGl+fn6qX7++EhISJEkJCQny9/e3gpYktWjRQi4uLtqyZYvV5r777pO7u7vVJjo6Wnv37tWJEyfyXHdGRobS09PtHgAAAADgiJs2bKWkpEiSAgMD7aYHBgZa81JSUlS2bFm7+cWKFVNAQIBdm7z6uHQdlxs/frz8/PysR4UKFf75BgEAAAC4rdy0YaswxcbGKi0tzXocPny4sEsCAAAAUMTctGErKChIkpSammo3PTU11ZoXFBSkY8eO2c2/ePGi/vrrL7s2efVx6Tou5+HhIV9fX7sHAAAAADjipg1bYWFhCgoK0qpVq6xp6enp2rJli6KioiRJUVFROnnypBITE602q1evVnZ2turXr2+1WbdunS5cuGC1WbFihapVq6aSJUveoK0BAAAAcLsp1LB1+vRpJSUlKSkpSdLfg2IkJSUpOTlZNptNgwcP1quvvqoFCxZox44d6tatm0JCQtShQwdJUkREhB544AH17dtXW7du1caNGzVgwAB17txZISEhkqQnnnhC7u7u6t27t3bu3KnPP/9cb7/9toYOHVpIWw0AAADgdlCoQ79///33atasmfU8JwB1795dcXFxGj58uM6cOaOnnnpKJ0+eVKNGjbR06VJ5enpay8ydO1cDBgxQ8+bN5eLioo4dO2rq1KnWfD8/Py1fvlwxMTGqW7euSpcurVGjRjHsOwAAAACnKtSw1bRpUxljrjjfZrNp7NixGjt27BXbBAQEaN68eVddT2RkpNavX3/ddQIAAACAo27ae7YAAAAAoCgjbAEAAACAExC2AAAAAMAJCFsAAAAA4ASELQAAAABwAsIWAAAAADgBYQsAAAAAnICwBQAAAABOQNgCAAAAACcgbAEAAACAExC2AAAAAMAJCFsAAAAA4ASELQAAAABwAsIWAAAAADgBYQsAAAAAnICwBQAAAABOQNgCAAAAACcgbAEAAACAExC2AAAAAMAJCFsAAAAA4ASELQAAAABwAsIWAAAAADgBYQsAAAAAnICwBQAAAABOQNgCAAAAACcgbAEAAACAExC2AAAAAMAJCFsAAAAA4ASELQAAAABwAsIWAAAAADgBYQsAAAAAnICwBQAAAABOQNgCAAAAACcgbAEAAACAExC2AAAAAMAJCFsAAAAA4ASELQAAAABwgtsqbE2bNk2VKlWSp6en6tevr61btxZ2SQAAAABuUbdN2Pr88881dOhQjR49Wj/88IPuuusuRUdH69ixY4VdGgAAAIBb0G0TtiZNmqS+ffuqZ8+eqlGjhmbMmKHixYvrww8/LOzSAAAAANyCihV2ATdCZmamEhMTFRsba01zcXFRixYtlJCQkKt9RkaGMjIyrOdpaWmSpPT09GuuKyvjXAFUfPvJz77NL46B4wpy/0scg+vBMSh8HIPCx9+CwscxKHwcg8J3rWOQM98Yc82+bCY/rYq4I0eOqFy5ctq0aZOioqKs6cOHD9fatWu1ZcsWu/ZjxozRyy+/fKPLBAAAAFBEHD58WOXLl79qm9vizJajYmNjNXToUOt5dna2/vrrL5UqVUo2m60QK7t+6enpqlChgg4fPixfX9/CLue2xDEofByDwsX+L3wcg8LHMSh8HIPCV9SPgTFGp06dUkhIyDXb3hZhq3Tp0nJ1dVVqaqrd9NTUVAUFBeVq7+HhIQ8PD7tp/v7+zizxhvH19S2SL+pbCceg8HEMChf7v/BxDAofx6DwcQwKX1E+Bn5+fvlqd1sMkOHu7q66detq1apV1rTs7GytWrXK7rJCAAAAACgot8WZLUkaOnSounfvrnr16umee+7RlClTdObMGfXs2bOwSwMAAABwC7ptwtZjjz2mP/74Q6NGjVJKSopq166tpUuXKjAwsLBLuyE8PDw0evToXJdH4sbhGBQ+jkHhYv8XPo5B4eMYFD6OQeG7nY7BbTEaIQAAAADcaLfFPVsAAAAAcKMRtgAAAADACQhbAAAAAOAEhC3gBoiPj5fNZtPJkyclSXFxcXa/3TZmzBjVrl27UGq7HV2+/+GYG/V65X3xz13+2QPny8/rtkePHurQocN19Z+fzy/eO9cnv++XSpUqacqUKTekpqKIv7H2CFs3qT/++EP9+/dXxYoV5eHhoaCgIEVHR2vjxo1OX/et+iFypT9uN8OXkeeff97ud+BuVj169JDNZsv1eOCBB/K1fEHv6+v9QvHYY49p3759BVLDjWCz2fTtt986vMzVHmPGjHFKrXDM5e+pUqVK6YEHHtBPP/1UIP3fe++9Onr0aL5/fPNWNWPGDPn4+OjixYvWtNOnT8vNzU1Nmza1a5vzOXXgwIEbXGX+FLXPr4JQWH+/b+fQ8E++h96Or9GruW2Gfi9qOnbsqMzMTM2ZM0eVK1dWamqqVq1apePHjzttnZmZmXJ3d3da/7iyEiVKqESJEoVdRr488MADmj17tt20Gz10qzFGWVlZ1728l5eXvLy8CrCim8/Ro0etf3/++ecaNWqU9u7da00rKq+328Gl76mUlBSNHDlSbdu2VXJy8j/u293dXUFBQf+4n6KuWbNmOn36tL7//ns1aNBAkrR+/XoFBQVpy5YtOn/+vDw9PSVJa9asUcWKFVWlShWH1vFPP5fy63b4/ELh+yffQ3mN2uPM1k3o5MmTWr9+vd544w01a9ZMoaGhuueeexQbG6uHHnpI0t//az19+nS1bt1aXl5eqly5sr788ku7fnbs2KH7779fXl5eKlWqlJ566imdPn3amp/zP0Xjxo1TSEiIqlWrpqZNm+rQoUMaMmSI9T+tknTo0CG1a9dOJUuWlLe3t2rWrKklS5bcuJ1yA23YsEGNGzeWl5eXKlSooEGDBunMmTPW/I8//lj16tWTj4+PgoKC9MQTT+jYsWN2fSxZskRVq1aVl5eXmjVrpt9+++2q67z8DE3OsXnrrbcUHBysUqVKKSYmRhcuXLDaZGRk6Pnnn1e5cuXk7e2t+vXrKz4+3prvrGOW8z9clz5Kliwp6e/X5fvvv6+HH35YxYsX1x133KEFCxZIkn777Tc1a9ZMklSyZEnZbDb16NFDkpSdna3x48crLCxMXl5euuuuu+xezzn/e/ndd9+pbt268vDw0CeffKKXX35Z27dvt16rcXFxkqRJkyapVq1a8vb2VoUKFfTMM8/YvfavdBnnxx9/rEqVKsnPz0+dO3fWqVOnrDZNmzbVwIEDNXjwYJUsWVKBgYGaNWuW9ePoPj4+Cg8P13fffWe3v37++We1bt1aJUqUUGBgoLp27ao///zTrt9BgwZp+PDhCggIUFBQkN1Zp0qVKkmSHn74YdlsNuv5tVx6fPz8/GSz2eymffbZZ4qIiJCnp6eqV6+u9957z27533//XY8//rgCAgLk7e2tevXqacuWLXZtrrW/rrZdkpScnKz27durRIkS8vX11aOPPqrU1NQrblN2drbGjh2r8uXLy8PDw/q9xEtt2rRJtWvXlqenp+rVq6dvv/1WNptNSUlJMsYoPDxcb731lt0ySUlJstls2r9/f772bUG79D1Vu3ZtjRgxQocPH9Yff/yR5//c59Sb87lytff6lS5hXrZsmSIiIlSiRAk98MADduFckt5///0rvj4yMzM1YMAABQcHy9PTU6GhoRo/frykvwPHmDFjrP8NDwkJ0aBBg5y38/KpWrVqCg4OtvuMjI+PV/v27RUWFqbNmzfbTW/WrJkyMjI0aNAglS1bVp6enmrUqJG2bdtm1+7yz6UNGzbkWndWVpaGDh0qf39/lSpVSsOHD9elv7qzaNEi+fv7W0Et5/iOGDHCatOnTx89+eSTkvI+2/L6668rMDBQPj4+6t27t86fP5+rjqsd01tFQfz9zhEfH6+ePXsqLS0tzysCzp49q169esnHx0cVK1bUzJkznb15N0x+voeePHlSTz/9tAIDA+Xp6ak777xTixYtkpT3a/S///2v6tSpI09PT1WuXFkvv/yy3Znmq31/yLFz5061bdtWvr6+8vHxUePGje3OQN+0r3GDm86FCxdMiRIlzODBg8358+fzbCPJlCpVysyaNcvs3bvXjBw50ri6uppdu3YZY4w5ffq0CQ4ONo888ojZsWOHWbVqlQkLCzPdu3e3+ujevbspUaKE6dq1q/n555/Nzz//bI4fP27Kly9vxo4da44ePWqOHj1qjDGmTZs2pmXLluann34yBw4cMAsXLjRr1651+r4oSN27dzft27fPNX3NmjVGkjlx4oTZv3+/8fb2NpMnTzb79u0zGzduNHfffbfp0aOH1f6DDz4wS5YsMQcOHDAJCQkmKirKtG7d2pqfnJxsPDw8zNChQ82ePXvMJ598YgIDA611GGPM7NmzjZ+fn7XM6NGjzV133WVXq6+vr+nXr5/ZvXu3WbhwoSlevLiZOXOm1aZPnz7m3nvvNevWrTP79+83b775pvHw8DD79u0zxjjnmF1pH+aQZMqXL2/mzZtnfvnlFzNo0CBTokQJc/z4cXPx4kXz1VdfGUlm79695ujRo+bkyZPGGGNeffVVU716dbN06VJz4MABM3v2bOPh4WHi4+ONMf/vGEVGRprly5eb/fv3m99//90899xzpmbNmtZr9ezZs8YYYyZPnmxWr15tDh48aFatWmWqVatm+vfvb9WZ1/4vUaKE9X5Zt26dCQoKMv/5z3+sNk2aNDE+Pj7mlVdeMfv27TOvvPKKcXV1Na1btzYzZ840+/btM/379zelSpUyZ86cMcYYc+LECVOmTBkTGxtrdu/ebX744QfTsmVL06xZM7t+fX19zZgxY8y+ffvMnDlzjM1mM8uXLzfGGHPs2DEjycyePdscPXrUHDt2zOHjdvn2fvLJJyY4ONh89dVX5tdffzVfffWVCQgIMHFxccYYY06dOmUqV65sGjdubNavX29++eUX8/nnn5tNmzY5tL+utl1ZWVmmdu3aplGjRub77783mzdvNnXr1jVNmjSxOy6Xvi8mTZpkfH19zaeffmr27Nljhg8fbtzc3KzXfFpamgkICDBPPvmk2blzp1myZImpWrWqkWR+/PFHY4wx48aNMzVq1LDbP4MGDTL33Xefw/u1IFz+njp16pR5+umnTXh4uMnKyrL7fMrx448/Gknm4MGDxpirv9cvX3727NnGzc3NtGjRwmzbts0kJiaaiIgI88QTT1j9X+v18eabb5oKFSqYdevWmd9++82sX7/ezJs3zxhjzPz5842vr69ZsmSJOXTokNmyZYvd51ZheuKJJ0yrVq2s5//617/M/PnzTb9+/cyoUaOMMcacPXvWeHh4mLi4ODNo0CATEhJilixZYnbu3Gm6d+9uSpYsaY4fP26Myftz6fjx47let2+88YYpWbKk+eqrr8yuXbtM7969jY+Pj3XcT548aVxcXMy2bduMMcZMmTLFlC5d2tSvX9/qIzw83MyaNcsYk/v9/PnnnxsPDw/z/vvvmz179pgXX3zR+Pj42NVwrWN6s7tRf78v7S8jI8NMmTLF+Pr6Wn9jTp06ZYwxJjQ01AQEBJhp06aZX375xYwfP964uLiYPXv2OH1f3AjX+h6alZVlGjRoYGrWrGmWL19ufe4sWbLEGJP7Nbpu3Trj6+tr4uLizIEDB8zy5ctNpUqVzJgxY6w2V/v+YIwxv//+uwkICDCPPPKI2bZtm9m7d6/58MMPrX1+M7/GCVs3qS+//NKULFnSeHp6mnvvvdfExsaa7du3W/MlmX79+tktU79+fesL5cyZM03JkiXN6dOnrfmLFy82Li4uJiUlxRjz94dXYGCgycjIsOsnNDTUTJ482W5arVq17N4URVH37t2Nq6ur8fb2tnt4enpaH669e/c2Tz31lN1y69evNy4uLubcuXN59rtt2zYjyfoQjo2NzfVl7oUXXnA4bIWGhpqLFy9a0zp16mQee+wxY4wxhw4dMq6uruZ///uf3XqaN29uYmNjjTHOOWZX2ofjxo0zxvz9uhw5cqTV/vTp00aS+e6774wxub/4GWPM+fPnTfHixa0v8jl69+5tHn/8cbvlvv32W7s2l++3K5k/f74pVaqU9Tyv/V+8eHGTnp5uTRs2bJjdl50mTZqYRo0aWc8vXrxovL29TdeuXa1pR48eNZJMQkKCMcaYV155xe7LnTHGHD582AqcefVrzN9fAl944QXruSTzzTffXHM7r+Ty7a1SpYr15TjHK6+8YqKioowxxvzf//2f8fHxsf7IXe569pcx9tu1fPly4+rqapKTk635O3fuNJLM1q1brfVcenxDQkKs19qlfT7zzDPGGGOmT59uSpUqZfdenTVrll3Y+t///mdcXV3Nli1bjDHGZGZmmtKlSxfaH+TL31OSTHBwsElMTDTG5P2euTxsXe29nlfYkmT2799vtZk2bZoJDAy0nl/r9TFw4EBz//33m+zs7FzrmzhxoqlatarJzMx0eF8426xZs4y3t7e5cOGCSU9PN8WKFTPHjh0z8+bNs8L2qlWrjCTz22+/GTc3NzN37lxr+czMTBMSEmImTJhgjMn/51JwcLC1jDF/f5EtX768XXioU6eOefPNN40xxnTo0MGMGzfOuLu7m1OnTpnff//dSLL+U+Hy93NUVJT1HshRv359uxqudUxvdjfq73de75dL93WO0NBQ8+STT1rPs7OzTdmyZc306dMLZoNvAlf7Hrps2TLj4uJi/R273OX7rXnz5ua1116za/Pxxx+b4OBg6/m1vj/ExsaasLCwK3623MyvcS4jvEl17NhRR44c0YIFC/TAAw8oPj5ederUsS6TkqSoqCi7ZaKiorR7925J0u7du3XXXXfJ29vbmt+wYUNlZ2fb3bdRq1atfN2nNWjQIL366qtq2LChRo8eXWA3b99ozZo1U1JSkt3j/ffft+Zv375dcXFx1j1UJUqUUHR0tLKzs3Xw4EFJUmJiotq1a6eKFSvKx8dHTZo0kSTr/ordu3erfv36duu9/FjlR82aNeXq6mo9Dw4Oti532LFjh7KyslS1alW7WteuXWudUnfWMctrH/br18+aHxkZaf3b29tbvr6+V7xMQ5L279+vs2fPqmXLlnbb8tFHH+W6Qb1evXr5qnHlypVq3ry5ypUrJx8fH3Xt2lXHjx/X2bNnr7hMpUqV5OPjYz2/dH/ntW2urq4qVaqUatWqZU0LDAyUJGu57du3a82aNXbbVb16dUmy27ZL+73SugvKmTNndODAAfXu3duurldffdWqKSkpSXfffbcCAgKu2I+j++vyNrt371aFChVUoUIFa36NGjXk7+9vfY5dKj09XUeOHFHDhg3tpjds2NBqv3fvXkVGRlr33kjSPffcY9c+JCREbdq00YcffihJWrhwoTIyMtSpU6crbquzXfqe2rp1q6Kjo9W6dWsdOnQoX8s7+l4vXry43f1Ilx6X/Lw+evTooaSkJFWrVk2DBg3S8uXLrb46deqkc+fOqXLlyurbt6+++eYbu0uFClPTpk115swZbdu2TevXr1fVqlVVpkwZNWnSxLpvKz4+XpUrV1ZaWpouXLhg93pzc3PTPffck+v1ebXPpbS0NB09etTub0KxYsVyLdOkSRPFx8fLGKP169frkUceUUREhDZs2KC1a9cqJCREd9xxR57ruNbfnPwc06LgRvz9dsSln285l2k763O7MFzte2hSUpLKly+vqlWr5quv7du3a+zYsXbHpm/fvjp69Kjd3+WrfX9ISkpS48aN5ebmlqv/m/01zgAZNzFPT0+1bNlSLVu21EsvvaQ+ffpo9OjR1n0uBeHSMHY1ffr0UXR0tBYvXqzly5dr/PjxmjhxogYOHFhgtdwI3t7eCg8Pt5v2+++/W/8+ffq0nn766TzvMahYsaLOnDmj6OhoRUdHa+7cuSpTpoySk5MVHR2tzMzMAq318g8Um82m7Oxsq05XV1clJibaBTLp/w184Kxjltc+zG/decm5l2rx4sUqV66c3bzLB97Iz+v1t99+U9u2bdW/f3+NGzdOAQEB2rBhg3r37q3MzEwVL178uuvOq82l03Lucbz0OLVr105vvPFGrvUFBwc7tO6CkrO/Z82alesLWs5rKT83Nl/v/nLWdjmiT58+6tq1qyZPnqzZs2frscceu+Lr4ka4/D31/vvvy8/PT7NmzVKrVq0kye4en0vv3ZQcf6/ndVxy+s/P66NOnTo6ePCgvvvuO61cuVKPPvqoWrRooS+//FIVKlTQ3r17tXLlSq1YsULPPPOM3nzzTa1duzbPL0k3Unh4uMqXL681a9boxIkT1hftkJAQVahQQZs2bdKaNWt0//33O9Rvfv+OXk3Tpk314Ycfavv27XJzc1P16tXVtGlTxcfH29V6PfJzTIuCm+3v9836+VaQrvQ99Pnnn3eon9OnT+vll1/WI488kuc6clxtn17t79LN/hrnzFYRUqNGDbsbPS+9oTfneUREhCQpIiJC27dvt2u/ceNGubi4qFq1alddj7u7e54jKlWoUEH9+vXT119/reeee06zZs36J5tzU6pTp4527dql8PDwXA93d3ft2bNHx48f1+uvv67GjRurevXquf4nKyIiQlu3brWbdvmx+qfuvvtuZWVl6dixY7nqvHTksZvtmOWcRb309VWjRg15eHgoOTk517ZcetbjSv1d/lpNTExUdna2Jk6cqAYNGqhq1ao6cuRIwW9MPtSpU0c7d+5UpUqVcm2bI1/Q3NzcCmyUs8DAQIWEhOjXX3/NVVNYWJikv/93MSkpSX/99VeBrDMvEREROnz4sA4fPmxN27Vrl06ePKkaNWrkau/r66uQkJBcww5v3LjRal+tWjXt2LFDGRkZ1vxLBzTI8eCDD8rb21vTp0/X0qVL1atXr4LarAJhs9nk4uKic+fOqUyZMpLsR5dMSkrKtUxBvdfz8/qQ/j4ejz32mGbNmqXPP/9cX331lfV68fLyUrt27TR16lTFx8crISFBO3bsuK56ClqzZs0UHx+v+Ph4uyHf77vvPn333XfaunWrmjVrpipVqsjd3d3u9XbhwgVt27Ytz9fnlfj5+Sk4ONhucJmLFy8qMTHRrl3jxo116tQpTZ482QpWOWHr8lovFxERkWvwmkv/5uT3mBZ1BfH3+3JX+j50u8r5HhoZGanff/8938O716lTR3v37s3z2Li45C+KREZGav369bn+s0m6+V/jnNm6CR0/flydOnVSr169FBkZKR8fH33//feaMGGC2rdvb7WbP3++6tWrp0aNGmnu3LnaunWrPvjgA0lSly5dNHr0aHXv3l1jxozRH3/8oYEDB6pr167WpU5XUqlSJa1bt06dO3eWh4eHSpcurcGDB6t169aqWrWqTpw4oTVr1ljB7lbywgsvqEGDBhowYID69Okjb29v7dq1SytWrNC7776rihUryt3dXe+884769eunn3/+Wa+88opdH/369dPEiRM1bNgw9enTR4mJiXaXfxaEqlWrqkuXLurWrZsmTpyou+++W3/88YdWrVqlyMhItWnTxmnHLCMjQykpKXbTihUrptKlS19z2dDQUNlsNi1atEgPPvigvLy85OPjo+eff15DhgxRdna2GjVqpLS0NG3cuFG+vr7q3r37FfurVKmSDh48aF3SkDMi4IULF/TOO++oXbt22rhxo2bMmPGPt/t6xMTEaNasWXr88cetUfn279+vzz77TO+//36+/8etUqVKWrVqlRo2bCgPDw9r9Mfr9fLLL2vQoEHy8/PTAw88oIyMDH3//fc6ceKEhg4dqscff1yvvfaaOnTooPHjxys4OFg//vijQkJCruuS2Ly0aNFCtWrVUpcuXTRlyhRdvHhRzzzzjJo0aXLFy7KGDRum0aNHq0qVKqpdu7Zmz56tpKQkzZ07V5L0xBNP6MUXX9RTTz2lESNGKDk52Rp5MOeso/T3/3T26NFDsbGxuuOOOwpsm67Xpe+pEydO6N1337XOiub8p8OYMWM0btw47du3TxMnTrRbvqDf69d6fUyaNEnBwcG6++675eLiovnz5ysoKEj+/v6Ki4tTVlaW6tevr+LFi+uTTz6Rl5eXQkND/9E+KijNmjWzRna99GxRkyZNNGDAAGVmZqpZs2by9vZW//79NWzYMAUEBKhixYqaMGGCzp49q969ezu0zmeffVavv/667rjjDlWvXl2TJk3K9btQJUuWVGRkpObOnat3331X0t8B8NFHH81Va1799+jRQ/Xq1VPDhg01d+5c7dy5U5UrV7baXOuY3goK4u/35SpVqqTTp09r1apVuuuuu1S8ePFCPQt+o1zre2iTJk103333qWPHjpo0aZLCw8O1Z8+eK/7u5qhRo9S2bVtVrFhR//73v+Xi4qLt27fr559/1quvvpqvmgYMGKB33nlHnTt3VmxsrPz8/LR582bdc889qlat2s39Gi/ke8aQh/Pnz5sRI0aYOnXqGD8/P1O8eHFTrVo1M3LkSGu0NUlm2rRppmXLlsbDw8NUqlTJfP7553b9/PTTT6ZZs2bG09PTBAQEmL59+1o3gRpz5dF9EhISTGRkpPHw8DA5L5EBAwaYKlWqGA8PD1OmTBnTtWtX8+effzpvJzhBfkYzMsaYrVu3mpYtW5oSJUoYb29vExkZaXdT/rx580ylSpWMh4eHiYqKMgsWLLC7Ad8YYxYuXGjCw8ONh4eHady4sfnwww8dHiDj8lqfffZZu5HaMjMzzahRo0ylSpWMm5ubCQ4ONg8//LD56aefjDHOOWbdu3c3knI9qlWrZozJeyAHPz8/M3v2bOv52LFjTVBQkLHZbNbomNnZ2WbKlCmmWrVqxs3NzZQpU8ZER0dfcUS1HOfPnzcdO3Y0/v7+1oh9xvw9al1wcLDx8vIy0dHR5qOPPnJo/xvz94iGoaGh1vMmTZqYZ5991q5NXoPJXL4P9u3bZx5++GHj7+9vvLy8TPXq1c3gwYOtwQXy6rd9+/Z2I4cuWLDAhIeHm2LFitnVlF953eQ9d+5cU7t2bePu7m5Klixp7rvvPvP1119b83/77TfTsWNH4+vra4oXL27q1atnDSpxvfvr8u06dOiQeeihh4y3t7fx8fExnTp1sgbwyWs9WVlZZsyYMaZcuXLGzc3N3HXXXdbN0zk2btxoIiMjjbu7u6lbt66ZN2+ekZRrlLADBw4YSXYDFxSGy99TPj4+5l//+pf58ssvrTYbNmwwtWrVMp6enqZx48Zm/vz5dgNkXO29np8b/r/55htz+deBq70+Zs6caWrXrm28vb2Nr6+vad68ufnhhx+svurXr298fX2Nt7e3adCggVm5cqUT9tz1OXjwoJFkqlevbjf9t99+s/ssM8aYc+fOmYEDB5rSpUsbDw8P07BhQ2vwFmOu/Ll0+ev2woUL5tlnnzW+vr7G39/fDB061HTr1i3Pz3hJZvfu3da0u+66ywQFBdm1y+sYjhs3zpQuXdqUKFHCdO/e3QwfPjzXe/Ra7/mb2Y36+53XMe3Xr58pVaqUkWRGjx5tjMn7s/+uu+6y5hd1+fkeevz4cdOzZ09TqlQp4+npae68806zaNEiY0zer9GlS5eae++913h5eRlfX19zzz332I1Ump/vD9u3bzetWrUyxYsXNz4+PqZx48bmwIED1vyb9TVuM+aSC8FRZNhsNn3zzTd5/qI6AOBvc+fOtX4r59Jr/tevX6/mzZvr8OHD1zzbDwDA9eIyQgDALeOjjz5S5cqVVa5cOW3fvl0vvPCCHn30UStoZWRk6I8//tCYMWPUqVMnghYAwKkYIAMAcMtISUnRk08+qYiICA0ZMkSdOnXSzJkzrfmffvqpQkNDdfLkSU2YMKEQKwUA3A64jBAAAAAAnIAzWwAAAADgBIQtAAAAAHACwhYAAAAAOAFhCwAAAACcgLAFAAAAAE5A2AIAAAAAJyBsAQCKrJSUFA0cOFCVK1eWh4eHKlSooHbt2mnVqlX5Wj4uLk7+/v7OLRIAcNsqVtgFAABwPX777Tc1bNhQ/v7+evPNN1WrVi1duHBBy5YtU0xMjPbs2VPYJTrswoULcnNzK+wyAAAFhDNbAIAi6ZlnnpHNZtPWrVvVsWNHVa1aVTVr1tTQoUO1efNmSdKkSZNUq1YteXt7q0KFCnrmmWd0+vRpSVJ8fLx69uyptLQ02Ww22Ww2jRkzRpKUkZGh559/XuXKlZO3t7fq16+v+Ph4u/XPmjVLFSpUUPHixfXwww9r0qRJuc6STZ8+XVWqVJG7u7uqVaumjz/+2G6+zWbT9OnT9dBDD8nb21uvvvqqwsPD9dZbb9m1S0pKks1m0/79+wtuBwIAnI6wBQAocv766y8tXbpUMTEx8vb2zjU/J/S4uLho6tSp2rlzp+bMmaPVq1dr+PDhkqR7771XU6ZMka+vr44ePaqjR4/q+eeflyQNGDBACQkJ+uyzz/TTTz+pU6dOeuCBB/TLL79IkjZu3Kh+/frp2WefVVJSklq2bKlx48bZ1fDNN9/o2Wef1XPPPaeff/5ZTz/9tHr27Kk1a9bYtRszZowefvhh7dixQ71791avXr00e/ZsuzazZ8/Wfffdp/Dw8ALZfwCAG8NmjDGFXQQAAI7YunWr6tevr6+//loPP/xwvpf78ssv1a9fP/3555+S/r5na/DgwTp58qTVJjk5WZUrV1ZycrJCQkKs6S1atNA999yj1157TZ07d9bp06e1aNEia/6TTz6pRYsWWX01bNhQNWvW1MyZM602jz76qM6cOaPFixdL+vvM1uDBgzV58mSrzZEjR1SxYkVt2rRJ99xzjy5cuKCQkBC99dZb6t69u0P7CQBQuDizBQAocvL7/4QrV65U8+bNVa5cOfn4+Khr1646fvy4zp49e8VlduzYoaysLFWtWlUlSpSwHmvXrtWBAwckSXv37tU999xjt9zlz3fv3q2GDRvaTWvYsKF2795tN61evXp2z0NCQtSmTRt9+OGHkqSFCxcqIyNDnTp1ytc2AwBuHgyQAQAocu644w7ZbLarDoLx22+/qW3bturfv7/GjRungIAAbdiwQb1791ZmZqaKFy+e53KnT5+Wq6urEhMT5erqajevRIkSBbodkvK8DLJPnz7q2rWrJk+erNmzZ+uxxx67Yr0AgJsXZ7YAAEVOQECAoqOjNW3aNJ05cybX/JMnTyoxMVHZ2dmaOHGiGjRooKpVq+rIkSN27dzd3ZWVlWU37e6771ZWVpaOHTum8PBwu0dQUJAkqVq1atq2bZvdcpc/j4iI0MaNG+2mbdy4UTVq1Ljm9j344IPy9vbW9OnTtXTpUvXq1euaywAAbj6ELQBAkTRt2jRlZWXpnnvu0VdffaVffvlFu3fv1tSpUxUVFaXw8HBduHBB77zzjn799Vd9/PHHmjFjhl0flSpV0unTp7Vq1Sr9+eefOnv2rKpWraouXbqoW7du+vrrr3Xw4EFt3bpV48ePt+61GjhwoJYsWaJJkybpl19+0f/93//pu+++k81ms/oeNmyY4uLiNH36dP3yyy+aNGmSvv76a2sQjqtxdXVVjx49FBsbqzvuuENRUVEFu/MAADeGAQCgiDpy5IiJiYkxoaGhxt3d3ZQrV8489NBDZs2aNcYYYyZNmmSCg4ONl5eXiY6ONh999JGRZE6cOGH10a9fP1OqVCkjyYwePdoYY0xmZqYZNWqUqVSpknFzczPBwcHm4YcfNj/99JO13MyZM025cuWMl5eX6dChg3n11VdNUFCQXX3vvfeeqVy5snFzczNVq1Y1H330kd18Seabb77Jc9sOHDhgJJkJEyb84/0EACgcjEYIAEAB6Nu3r/bs2aP169cXSH/r169X8+bNdfjwYQUGBhZInwCAG4sBMgAAuA5vvfWWWrZsKW9vb3333XeaM2eO3nvvvX/cb0ZGhv744w+NGTNGnTp1ImgBQBHGPVsAAFyHrVu3qmXLlqpVq5ZmzJihqVOnqk+fPv+4308//VShoaE6efKkJkyYUACVAgAKC5cRAgAAAIATcGYLAAAAAJyAsAUAAAAATkDYAgAAAAAnIGwBAAAAgBMQtgAAAADACQhbAAAAAOAEhC0AAAAAcALCFgAAAAA4wf8Hx940Mau7j2YAAAAASUVORK5CYII=",
|
161 |
+
"text/plain": [
|
162 |
+
"<Figure size 1000x500 with 1 Axes>"
|
163 |
+
]
|
164 |
+
},
|
165 |
+
"metadata": {},
|
166 |
+
"output_type": "display_data"
|
167 |
+
}
|
168 |
+
],
|
169 |
+
"source": [
|
170 |
+
"# Distribution Bar plot (Count plot)\n",
|
171 |
+
"plt.figure(figsize=(10, 5))\n",
|
172 |
+
"sns.barplot(x=df[\"Category\"].value_counts().index, y=df[\"Category\"].value_counts())\n",
|
173 |
+
"plt.ylabel(\"Number of News\")\n",
|
174 |
+
"plt.title(\"Category Distribution\")"
|
175 |
+
]
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"cell_type": "markdown",
|
179 |
+
"metadata": {},
|
180 |
+
"source": [
|
181 |
+
"**There's no extreme data imbalance except \"Health\" and \"Science\" news are almost half the \"Sports\" (majority) news.**"
|
182 |
+
]
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"cell_type": "code",
|
186 |
+
"execution_count": 22,
|
187 |
+
"metadata": {},
|
188 |
+
"outputs": [
|
189 |
+
{
|
190 |
+
"data": {
|
191 |
+
"application/vnd.jupyter.widget-view+json": {
|
192 |
+
"model_id": "8368a3df9eea413b99d2d0c5876fbcf6",
|
193 |
+
"version_major": 2,
|
194 |
+
"version_minor": 0
|
195 |
+
},
|
196 |
+
"text/plain": [
|
197 |
+
"interactive(children=(Dropdown(description='category', options=('Business', 'Entertainment', 'Headlines', 'Hea…"
|
198 |
+
]
|
199 |
+
},
|
200 |
+
"metadata": {},
|
201 |
+
"output_type": "display_data"
|
202 |
+
}
|
203 |
+
],
|
204 |
+
"source": [
|
205 |
+
"# Word cloud\n",
|
206 |
+
"categories = df[\"Category\"].unique().tolist()\n",
|
207 |
+
"\n",
|
208 |
+
"\n",
|
209 |
+
"@widgets.interact(category=categories)\n",
|
210 |
+
"def display_categotical_plots(category=categories[0]):\n",
|
211 |
+
" subset = df[df[\"Category\"] == category].sample(n=100, random_state=42)\n",
|
212 |
+
" text = subset[\"Title\"].values\n",
|
213 |
+
" cloud = WordCloud(stopwords=STOPWORDS, background_color=\"black\", collocations=False, width=600, height=400).generate(\" \".join(text))\n",
|
214 |
+
" plt.axis(\"off\")\n",
|
215 |
+
" plt.imshow(cloud)"
|
216 |
+
]
|
217 |
+
},
|
218 |
+
{
|
219 |
+
"cell_type": "markdown",
|
220 |
+
"metadata": {},
|
221 |
+
"source": [
|
222 |
+
"**From the word cloud we can immediately draw one insight about the redundant key words like \"New\" which is coming a lot in different categories.**</br>\n",
|
223 |
+
"We can also see some action verbs, adjectives, adverbs which need to be removed to some extent before training the model.**</br>\n",
|
224 |
+
"Other than that the word cloud seems very intuitive to what the respective categorical tag/name is.</br></br>\n",
|
225 |
+
"We can also see the \"Headlines\" category contains mixed words (will be mixed as it can be a ground breaking news of any category), so we'll hold out those data instances as a test set without targets just to analyze the number of headlines with different categories."
|
226 |
+
]
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"cell_type": "code",
|
230 |
+
"execution_count": null,
|
231 |
+
"metadata": {},
|
232 |
+
"outputs": [],
|
233 |
+
"source": []
|
234 |
+
}
|
235 |
+
],
|
236 |
+
"metadata": {
|
237 |
+
"kernelspec": {
|
238 |
+
"display_name": "news_venv",
|
239 |
+
"language": "python",
|
240 |
+
"name": "python3"
|
241 |
+
},
|
242 |
+
"language_info": {
|
243 |
+
"codemirror_mode": {
|
244 |
+
"name": "ipython",
|
245 |
+
"version": 3
|
246 |
+
},
|
247 |
+
"file_extension": ".py",
|
248 |
+
"mimetype": "text/x-python",
|
249 |
+
"name": "python",
|
250 |
+
"nbconvert_exporter": "python",
|
251 |
+
"pygments_lexer": "ipython3",
|
252 |
+
"version": "3.10.13"
|
253 |
+
}
|
254 |
+
},
|
255 |
+
"nbformat": 4,
|
256 |
+
"nbformat_minor": 2
|
257 |
+
}
|
notebooks/newsclassifier-roberta-base-wandb-track-sweep.ipynb
ADDED
@@ -0,0 +1,1035 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# NewsClassifier"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "code",
|
12 |
+
"execution_count": 1,
|
13 |
+
"metadata": {
|
14 |
+
"id": "mtVYEQSYsswc",
|
15 |
+
"outputId": "6f16c0c1-ef25-406c-dd14-edd1a72dc760",
|
16 |
+
"trusted": true
|
17 |
+
},
|
18 |
+
"outputs": [
|
19 |
+
{
|
20 |
+
"name": "stderr",
|
21 |
+
"output_type": "stream",
|
22 |
+
"text": [
|
23 |
+
"[nltk_data] Downloading package stopwords to\n",
|
24 |
+
"[nltk_data] C:\\Users\\manis\\AppData\\Roaming\\nltk_data...\n",
|
25 |
+
"[nltk_data] Package stopwords is already up-to-date!\n"
|
26 |
+
]
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"data": {
|
30 |
+
"text/plain": [
|
31 |
+
"True"
|
32 |
+
]
|
33 |
+
},
|
34 |
+
"execution_count": 1,
|
35 |
+
"metadata": {},
|
36 |
+
"output_type": "execute_result"
|
37 |
+
}
|
38 |
+
],
|
39 |
+
"source": [
|
40 |
+
"# Imports\n",
|
41 |
+
"import os\n",
|
42 |
+
"import gc\n",
|
43 |
+
"import time\n",
|
44 |
+
"from pathlib import Path\n",
|
45 |
+
"import json\n",
|
46 |
+
"from typing import Tuple, Dict\n",
|
47 |
+
"from warnings import filterwarnings\n",
|
48 |
+
"\n",
|
49 |
+
"filterwarnings(\"ignore\")\n",
|
50 |
+
"\n",
|
51 |
+
"import pandas as pd\n",
|
52 |
+
"import numpy as np\n",
|
53 |
+
"from sklearn.model_selection import train_test_split\n",
|
54 |
+
"from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n",
|
55 |
+
"\n",
|
56 |
+
"import matplotlib.pyplot as plt\n",
|
57 |
+
"import seaborn as sns\n",
|
58 |
+
"import ipywidgets as widgets\n",
|
59 |
+
"from wordcloud import WordCloud, STOPWORDS\n",
|
60 |
+
"\n",
|
61 |
+
"from tqdm.auto import tqdm\n",
|
62 |
+
"from dataclasses import dataclass\n",
|
63 |
+
"\n",
|
64 |
+
"import re\n",
|
65 |
+
"import nltk\n",
|
66 |
+
"from nltk.corpus import stopwords\n",
|
67 |
+
"\n",
|
68 |
+
"import torch\n",
|
69 |
+
"import torch.nn as nn\n",
|
70 |
+
"import torch.nn.functional as F\n",
|
71 |
+
"from torch.utils.data import DataLoader, Dataset\n",
|
72 |
+
"\n",
|
73 |
+
"from transformers import RobertaTokenizer, RobertaModel\n",
|
74 |
+
"\n",
|
75 |
+
"import wandb\n",
|
76 |
+
"\n",
|
77 |
+
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
|
78 |
+
"\n",
|
79 |
+
"nltk.download(\"stopwords\")"
|
80 |
+
]
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"cell_type": "code",
|
84 |
+
"execution_count": 2,
|
85 |
+
"metadata": {
|
86 |
+
"trusted": true
|
87 |
+
},
|
88 |
+
"outputs": [
|
89 |
+
{
|
90 |
+
"name": "stderr",
|
91 |
+
"output_type": "stream",
|
92 |
+
"text": [
|
93 |
+
"Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n"
|
94 |
+
]
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"name": "stderr",
|
98 |
+
"output_type": "stream",
|
99 |
+
"text": [
|
100 |
+
"\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mmanishdrw1\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n"
|
101 |
+
]
|
102 |
+
},
|
103 |
+
{
|
104 |
+
"data": {
|
105 |
+
"text/plain": [
|
106 |
+
"True"
|
107 |
+
]
|
108 |
+
},
|
109 |
+
"execution_count": 2,
|
110 |
+
"metadata": {},
|
111 |
+
"output_type": "execute_result"
|
112 |
+
}
|
113 |
+
],
|
114 |
+
"source": [
|
115 |
+
"wandb.login()"
|
116 |
+
]
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"cell_type": "code",
|
120 |
+
"execution_count": 3,
|
121 |
+
"metadata": {
|
122 |
+
"id": "fGW_WYn31JHT",
|
123 |
+
"trusted": true
|
124 |
+
},
|
125 |
+
"outputs": [],
|
126 |
+
"source": [
|
127 |
+
"@dataclass\n",
|
128 |
+
"class Cfg:\n",
|
129 |
+
" STOPWORDS = stopwords.words(\"english\")\n",
|
130 |
+
" dataset_loc = \"../dataset/raw/news_dataset.csv\"\n",
|
131 |
+
" test_size = 0.2\n",
|
132 |
+
"\n",
|
133 |
+
" add_special_tokens = True\n",
|
134 |
+
" max_len = 50\n",
|
135 |
+
" pad_to_max_length = True\n",
|
136 |
+
" truncation = True\n",
|
137 |
+
"\n",
|
138 |
+
" change_config = False\n",
|
139 |
+
"\n",
|
140 |
+
" dropout_pb = 0.5\n",
|
141 |
+
" lr = 1e-4\n",
|
142 |
+
" lr_redfactor = 0.7\n",
|
143 |
+
" lr_redpatience = 4\n",
|
144 |
+
" epochs = 10\n",
|
145 |
+
" batch_size = 128\n",
|
146 |
+
"\n",
|
147 |
+
" wandb_sweep = False"
|
148 |
+
]
|
149 |
+
},
|
150 |
+
{
|
151 |
+
"cell_type": "code",
|
152 |
+
"execution_count": 13,
|
153 |
+
"metadata": {
|
154 |
+
"id": "7V5OJWw4sswg",
|
155 |
+
"outputId": "8eb13263-d31a-4d49-f1f6-3c2dc0595c78",
|
156 |
+
"trusted": true
|
157 |
+
},
|
158 |
+
"outputs": [
|
159 |
+
{
|
160 |
+
"name": "stdout",
|
161 |
+
"output_type": "stream",
|
162 |
+
"text": [
|
163 |
+
"Matthew McConaughey Gives Joy Behar A Foot Massage On ‘The View’\n",
|
164 |
+
"Entertainment\n"
|
165 |
+
]
|
166 |
+
}
|
167 |
+
],
|
168 |
+
"source": [
|
169 |
+
"df = pd.read_csv(Cfg.dataset_loc)\n",
|
170 |
+
"print(df[\"Title\"][10040])\n",
|
171 |
+
"print(df[\"Category\"][10040])"
|
172 |
+
]
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"cell_type": "markdown",
|
176 |
+
"metadata": {
|
177 |
+
"id": "w05pkO5RN1H2"
|
178 |
+
},
|
179 |
+
"source": [
|
180 |
+
"## Prepare Data"
|
181 |
+
]
|
182 |
+
},
|
183 |
+
{
|
184 |
+
"cell_type": "code",
|
185 |
+
"execution_count": 14,
|
186 |
+
"metadata": {
|
187 |
+
"id": "l8Z3Hhk3sswg",
|
188 |
+
"trusted": true
|
189 |
+
},
|
190 |
+
"outputs": [],
|
191 |
+
"source": [
|
192 |
+
"def prepare_data(df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:\n",
|
193 |
+
" \"\"\"Separate headlines instance and feature selection.\n",
|
194 |
+
"\n",
|
195 |
+
" Args:\n",
|
196 |
+
" df: original dataframe.\n",
|
197 |
+
"\n",
|
198 |
+
" Returns:\n",
|
199 |
+
" df: new dataframe with appropriate features.\n",
|
200 |
+
" headlines_df: dataframe cintaining \"headlines\" category instances.\n",
|
201 |
+
" \"\"\"\n",
|
202 |
+
" df = df[[\"Title\", \"Category\"]]\n",
|
203 |
+
" df.rename(columns={\"Title\": \"Text\"}, inplace=True)\n",
|
204 |
+
" df, headlines_df = df[df[\"Category\"] != \"Headlines\"].reset_index(drop=True), df[df[\"Category\"] == \"Headlines\"].reset_index(drop=True)\n",
|
205 |
+
"\n",
|
206 |
+
" return df, headlines_df"
|
207 |
+
]
|
208 |
+
},
|
209 |
+
{
|
210 |
+
"cell_type": "code",
|
211 |
+
"execution_count": 15,
|
212 |
+
"metadata": {
|
213 |
+
"id": "d4t7JjIEsswg",
|
214 |
+
"trusted": true
|
215 |
+
},
|
216 |
+
"outputs": [],
|
217 |
+
"source": [
|
218 |
+
"def clean_text(text: str) -> str:\n",
|
219 |
+
" \"\"\"Clean text (lower, puntuations removal, blank space removal).\"\"\"\n",
|
220 |
+
" # lower case the text\n",
|
221 |
+
" text = text.lower() # necessary to do before as stopwords are in lower case\n",
|
222 |
+
"\n",
|
223 |
+
" # remove stopwords\n",
|
224 |
+
" stp_pattern = re.compile(r\"\\b(\" + r\"|\".join(Cfg.STOPWORDS) + r\")\\b\\s*\")\n",
|
225 |
+
" text = stp_pattern.sub(\"\", text)\n",
|
226 |
+
"\n",
|
227 |
+
" # custom cleaning\n",
|
228 |
+
" text = text.strip() # remove space at start or end if any\n",
|
229 |
+
" text = re.sub(\" +\", \" \", text) # remove extra spaces\n",
|
230 |
+
" text = re.sub(\"[^A-Za-z0-9]+\", \" \", text) # remove characters that are not alphanumeric\n",
|
231 |
+
"\n",
|
232 |
+
" return text"
|
233 |
+
]
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"cell_type": "code",
|
237 |
+
"execution_count": 16,
|
238 |
+
"metadata": {
|
239 |
+
"id": "NokmvVFusswh",
|
240 |
+
"trusted": true
|
241 |
+
},
|
242 |
+
"outputs": [],
|
243 |
+
"source": [
|
244 |
+
"def preprocess(df: pd.DataFrame) -> Tuple[pd.DataFrame, Dict, Dict]:\n",
|
245 |
+
" \"\"\"Preprocess the data.\n",
|
246 |
+
"\n",
|
247 |
+
" Args:\n",
|
248 |
+
" df: Dataframe on which the preprocessing steps need to be performed.\n",
|
249 |
+
"\n",
|
250 |
+
" Returns:\n",
|
251 |
+
" df: Preprocessed Data.\n",
|
252 |
+
" class_to_index: class labels to indices mapping\n",
|
253 |
+
" class_to_index: indices to class labels mapping\n",
|
254 |
+
" \"\"\"\n",
|
255 |
+
" df, headlines_df = prepare_data(df)\n",
|
256 |
+
"\n",
|
257 |
+
" cats = df[\"Category\"].unique().tolist()\n",
|
258 |
+
" num_classes = len(cats)\n",
|
259 |
+
" class_to_index = {tag: i for i, tag in enumerate(cats)}\n",
|
260 |
+
" index_to_class = {v: k for k, v in class_to_index.items()}\n",
|
261 |
+
"\n",
|
262 |
+
" df[\"Text\"] = df[\"Text\"].apply(clean_text) # clean text\n",
|
263 |
+
" df = df[[\"Text\", \"Category\"]]\n",
|
264 |
+
" df[\"Category\"] = df[\"Category\"].map(class_to_index) # label encoding\n",
|
265 |
+
" return df, class_to_index, index_to_class"
|
266 |
+
]
|
267 |
+
},
|
268 |
+
{
|
269 |
+
"cell_type": "code",
|
270 |
+
"execution_count": 17,
|
271 |
+
"metadata": {
|
272 |
+
"id": "f45cNikCsswh",
|
273 |
+
"outputId": "880e338e-11a3-4048-ccf7-d30bf13e996b",
|
274 |
+
"trusted": true
|
275 |
+
},
|
276 |
+
"outputs": [
|
277 |
+
{
|
278 |
+
"data": {
|
279 |
+
"text/html": [
|
280 |
+
"<div>\n",
|
281 |
+
"<style scoped>\n",
|
282 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
283 |
+
" vertical-align: middle;\n",
|
284 |
+
" }\n",
|
285 |
+
"\n",
|
286 |
+
" .dataframe tbody tr th {\n",
|
287 |
+
" vertical-align: top;\n",
|
288 |
+
" }\n",
|
289 |
+
"\n",
|
290 |
+
" .dataframe thead th {\n",
|
291 |
+
" text-align: right;\n",
|
292 |
+
" }\n",
|
293 |
+
"</style>\n",
|
294 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
295 |
+
" <thead>\n",
|
296 |
+
" <tr style=\"text-align: right;\">\n",
|
297 |
+
" <th></th>\n",
|
298 |
+
" <th>Text</th>\n",
|
299 |
+
" <th>Category</th>\n",
|
300 |
+
" </tr>\n",
|
301 |
+
" </thead>\n",
|
302 |
+
" <tbody>\n",
|
303 |
+
" <tr>\n",
|
304 |
+
" <th>0</th>\n",
|
305 |
+
" <td>chainlink link falters hedera hbar wobbles yet...</td>\n",
|
306 |
+
" <td>0</td>\n",
|
307 |
+
" </tr>\n",
|
308 |
+
" <tr>\n",
|
309 |
+
" <th>1</th>\n",
|
310 |
+
" <td>funds punished owning nvidia shares stunning 2...</td>\n",
|
311 |
+
" <td>0</td>\n",
|
312 |
+
" </tr>\n",
|
313 |
+
" <tr>\n",
|
314 |
+
" <th>2</th>\n",
|
315 |
+
" <td>crude oil prices stalled hedge funds sold kemp</td>\n",
|
316 |
+
" <td>0</td>\n",
|
317 |
+
" </tr>\n",
|
318 |
+
" <tr>\n",
|
319 |
+
" <th>3</th>\n",
|
320 |
+
" <td>grayscale bitcoin win still half battle</td>\n",
|
321 |
+
" <td>0</td>\n",
|
322 |
+
" </tr>\n",
|
323 |
+
" <tr>\n",
|
324 |
+
" <th>4</th>\n",
|
325 |
+
" <td>home shopping editor miss labor day deals eyeing</td>\n",
|
326 |
+
" <td>0</td>\n",
|
327 |
+
" </tr>\n",
|
328 |
+
" <tr>\n",
|
329 |
+
" <th>...</th>\n",
|
330 |
+
" <td>...</td>\n",
|
331 |
+
" <td>...</td>\n",
|
332 |
+
" </tr>\n",
|
333 |
+
" <tr>\n",
|
334 |
+
" <th>44142</th>\n",
|
335 |
+
" <td>slovakia election could echo ukraine expect</td>\n",
|
336 |
+
" <td>6</td>\n",
|
337 |
+
" </tr>\n",
|
338 |
+
" <tr>\n",
|
339 |
+
" <th>44143</th>\n",
|
340 |
+
" <td>things know nobel prizes washington post</td>\n",
|
341 |
+
" <td>6</td>\n",
|
342 |
+
" </tr>\n",
|
343 |
+
" <tr>\n",
|
344 |
+
" <th>44144</th>\n",
|
345 |
+
" <td>brief calm protests killing 2 students rock im...</td>\n",
|
346 |
+
" <td>6</td>\n",
|
347 |
+
" </tr>\n",
|
348 |
+
" <tr>\n",
|
349 |
+
" <th>44145</th>\n",
|
350 |
+
" <td>one safe france vows action bedbugs sweep paris</td>\n",
|
351 |
+
" <td>6</td>\n",
|
352 |
+
" </tr>\n",
|
353 |
+
" <tr>\n",
|
354 |
+
" <th>44146</th>\n",
|
355 |
+
" <td>slovakia election polls open knife edge vote u...</td>\n",
|
356 |
+
" <td>6</td>\n",
|
357 |
+
" </tr>\n",
|
358 |
+
" </tbody>\n",
|
359 |
+
"</table>\n",
|
360 |
+
"<p>44147 rows × 2 columns</p>\n",
|
361 |
+
"</div>"
|
362 |
+
],
|
363 |
+
"text/plain": [
|
364 |
+
" Text Category\n",
|
365 |
+
"0 chainlink link falters hedera hbar wobbles yet... 0\n",
|
366 |
+
"1 funds punished owning nvidia shares stunning 2... 0\n",
|
367 |
+
"2 crude oil prices stalled hedge funds sold kemp 0\n",
|
368 |
+
"3 grayscale bitcoin win still half battle 0\n",
|
369 |
+
"4 home shopping editor miss labor day deals eyeing 0\n",
|
370 |
+
"... ... ...\n",
|
371 |
+
"44142 slovakia election could echo ukraine expect 6\n",
|
372 |
+
"44143 things know nobel prizes washington post 6\n",
|
373 |
+
"44144 brief calm protests killing 2 students rock im... 6\n",
|
374 |
+
"44145 one safe france vows action bedbugs sweep paris 6\n",
|
375 |
+
"44146 slovakia election polls open knife edge vote u... 6\n",
|
376 |
+
"\n",
|
377 |
+
"[44147 rows x 2 columns]"
|
378 |
+
]
|
379 |
+
},
|
380 |
+
"execution_count": 17,
|
381 |
+
"metadata": {},
|
382 |
+
"output_type": "execute_result"
|
383 |
+
}
|
384 |
+
],
|
385 |
+
"source": [
|
386 |
+
"ds, class_to_index, index_to_class = preprocess(df)\n",
|
387 |
+
"ds"
|
388 |
+
]
|
389 |
+
},
|
390 |
+
{
|
391 |
+
"cell_type": "code",
|
392 |
+
"execution_count": null,
|
393 |
+
"metadata": {},
|
394 |
+
"outputs": [],
|
395 |
+
"source": [
|
396 |
+
"index_to_class"
|
397 |
+
]
|
398 |
+
},
|
399 |
+
{
|
400 |
+
"cell_type": "code",
|
401 |
+
"execution_count": 20,
|
402 |
+
"metadata": {
|
403 |
+
"id": "zGlMz2UJsswi",
|
404 |
+
"trusted": true
|
405 |
+
},
|
406 |
+
"outputs": [],
|
407 |
+
"source": [
|
408 |
+
"# Data splits\n",
|
409 |
+
"train_ds, val_ds = train_test_split(ds, test_size=Cfg.test_size, stratify=ds[\"Category\"])"
|
410 |
+
]
|
411 |
+
},
|
412 |
+
{
|
413 |
+
"cell_type": "code",
|
414 |
+
"execution_count": 21,
|
415 |
+
"metadata": {
|
416 |
+
"id": "zTeAsruMsswi",
|
417 |
+
"outputId": "bffed91d-04c6-490e-d682-03537d3182dd",
|
418 |
+
"trusted": true
|
419 |
+
},
|
420 |
+
"outputs": [
|
421 |
+
{
|
422 |
+
"data": {
|
423 |
+
"text/plain": [
|
424 |
+
"{'input_ids': tensor([ 0, 462, 25744, 7188, 155, 23, 462, 11485, 112, 2,\n",
|
425 |
+
" 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
|
426 |
+
" 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
|
427 |
+
" 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
|
428 |
+
" 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
429 |
+
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
430 |
+
" 0, 0])}"
|
431 |
+
]
|
432 |
+
},
|
433 |
+
"execution_count": 21,
|
434 |
+
"metadata": {},
|
435 |
+
"output_type": "execute_result"
|
436 |
+
}
|
437 |
+
],
|
438 |
+
"source": [
|
439 |
+
"def prepare_input(tokenizer: RobertaTokenizer, text: str) -> Dict:\n",
|
440 |
+
" \"\"\"Tokenize and prepare the input text using the provided tokenizer.\n",
|
441 |
+
"\n",
|
442 |
+
" Args:\n",
|
443 |
+
" tokenizer (RobertaTokenizer): The Roberta tokenizer to encode the input.\n",
|
444 |
+
" text (str): The input text to be tokenized.\n",
|
445 |
+
"\n",
|
446 |
+
" Returns:\n",
|
447 |
+
" inputs (dict): A dictionary containing the tokenized input with keys such as 'input_ids',\n",
|
448 |
+
" 'attention_mask', etc.\n",
|
449 |
+
" \"\"\"\n",
|
450 |
+
" inputs = tokenizer.encode_plus(\n",
|
451 |
+
" text,\n",
|
452 |
+
" return_tensors=None,\n",
|
453 |
+
" add_special_tokens=Cfg.add_special_tokens,\n",
|
454 |
+
" max_length=Cfg.max_len,\n",
|
455 |
+
" pad_to_max_length=Cfg.pad_to_max_length,\n",
|
456 |
+
" truncation=Cfg.truncation,\n",
|
457 |
+
" )\n",
|
458 |
+
" for k, v in inputs.items():\n",
|
459 |
+
" inputs[k] = torch.tensor(v, dtype=torch.long)\n",
|
460 |
+
" return inputs\n",
|
461 |
+
"\n",
|
462 |
+
"\n",
|
463 |
+
"class NewsDataset(Dataset):\n",
|
464 |
+
" def __init__(self, ds):\n",
|
465 |
+
" self.texts = ds[\"Text\"].values\n",
|
466 |
+
" self.labels = ds[\"Category\"].values\n",
|
467 |
+
"\n",
|
468 |
+
" def __len__(self):\n",
|
469 |
+
" return len(self.texts)\n",
|
470 |
+
"\n",
|
471 |
+
" def __getitem__(self, item):\n",
|
472 |
+
" inputs = prepare_input(tokenizer, self.texts[item])\n",
|
473 |
+
" labels = torch.tensor(self.labels[item], dtype=torch.float)\n",
|
474 |
+
" return inputs, labels\n",
|
475 |
+
"\n",
|
476 |
+
"\n",
|
477 |
+
"def collate(inputs: Dict) -> Dict:\n",
|
478 |
+
" \"\"\"Collate and modify the input dictionary to have the same sequence length for a particular input batch.\n",
|
479 |
+
"\n",
|
480 |
+
" Args:\n",
|
481 |
+
" inputs (dict): A dictionary containing input tensors with varying sequence lengths.\n",
|
482 |
+
"\n",
|
483 |
+
" Returns:\n",
|
484 |
+
" modified_inputs (dict): A modified dictionary with input tensors trimmed to have the same sequence length.\n",
|
485 |
+
" \"\"\"\n",
|
486 |
+
" max_len = int(inputs[\"input_ids\"].sum(axis=1).max())\n",
|
487 |
+
" for k, v in inputs.items():\n",
|
488 |
+
" inputs[k] = inputs[k][:, :max_len]\n",
|
489 |
+
" return inputs\n",
|
490 |
+
"\n",
|
491 |
+
"\n",
|
492 |
+
"tokenizer = RobertaTokenizer.from_pretrained(\"roberta-base\")\n",
|
493 |
+
"\n",
|
494 |
+
"sample_input = prepare_input(tokenizer, train_ds[\"Text\"].values[10])\n",
|
495 |
+
"sample_input"
|
496 |
+
]
|
497 |
+
},
|
498 |
+
{
|
499 |
+
"cell_type": "markdown",
|
500 |
+
"metadata": {
|
501 |
+
"id": "-qp-4d-aN503"
|
502 |
+
},
|
503 |
+
"source": [
|
504 |
+
"## Model"
|
505 |
+
]
|
506 |
+
},
|
507 |
+
{
|
508 |
+
"cell_type": "code",
|
509 |
+
"execution_count": 22,
|
510 |
+
"metadata": {
|
511 |
+
"id": "XIJ6ARJfsswj",
|
512 |
+
"trusted": true
|
513 |
+
},
|
514 |
+
"outputs": [],
|
515 |
+
"source": [
|
516 |
+
"class CustomModel(nn.Module):\n",
|
517 |
+
" def __init__(self, num_classes, change_config=False, dropout_pb=0.0):\n",
|
518 |
+
" super(CustomModel, self).__init__()\n",
|
519 |
+
" if change_config:\n",
|
520 |
+
" pass\n",
|
521 |
+
" self.model = RobertaModel.from_pretrained(\"roberta-base\")\n",
|
522 |
+
" self.hidden_size = self.model.config.hidden_size\n",
|
523 |
+
" self.num_classes = num_classes\n",
|
524 |
+
" self.dropout_pb = dropout_pb\n",
|
525 |
+
" self.dropout = torch.nn.Dropout(self.dropout_pb)\n",
|
526 |
+
" self.fc = nn.Linear(self.hidden_size, self.num_classes)\n",
|
527 |
+
"\n",
|
528 |
+
" def forward(self, inputs):\n",
|
529 |
+
" output = self.model(**inputs)\n",
|
530 |
+
" z = self.dropout(output[1])\n",
|
531 |
+
" z = self.fc(z)\n",
|
532 |
+
" return z\n",
|
533 |
+
"\n",
|
534 |
+
" @torch.inference_mode()\n",
|
535 |
+
" def predict(self, inputs):\n",
|
536 |
+
" self.eval()\n",
|
537 |
+
" z = self(inputs)\n",
|
538 |
+
" y_pred = torch.argmax(z, dim=1).cpu().numpy()\n",
|
539 |
+
" return y_pred\n",
|
540 |
+
"\n",
|
541 |
+
" @torch.inference_mode()\n",
|
542 |
+
" def predict_proba(self, inputs):\n",
|
543 |
+
" self.eval()\n",
|
544 |
+
" z = self(inputs)\n",
|
545 |
+
" y_probs = F.softmax(z, dim=1).cpu().numpy()\n",
|
546 |
+
" return y_probs\n",
|
547 |
+
"\n",
|
548 |
+
" def save(self, dp):\n",
|
549 |
+
" with open(Path(dp, \"args.json\"), \"w\") as fp:\n",
|
550 |
+
" contents = {\n",
|
551 |
+
" \"dropout_pb\": self.dropout_pb,\n",
|
552 |
+
" \"hidden_size\": self.hidden_size,\n",
|
553 |
+
" \"num_classes\": self.num_classes,\n",
|
554 |
+
" }\n",
|
555 |
+
" json.dump(contents, fp, indent=4, sort_keys=False)\n",
|
556 |
+
" torch.save(self.state_dict(), os.path.join(dp, \"model.pt\"))\n",
|
557 |
+
"\n",
|
558 |
+
" @classmethod\n",
|
559 |
+
" def load(cls, args_fp, state_dict_fp):\n",
|
560 |
+
" with open(args_fp, \"r\") as fp:\n",
|
561 |
+
" kwargs = json.load(fp=fp)\n",
|
562 |
+
" llm = RobertaModel.from_pretrained(\"roberta-base\")\n",
|
563 |
+
" model = cls(llm=llm, **kwargs)\n",
|
564 |
+
" model.load_state_dict(torch.load(state_dict_fp, map_location=torch.device(\"cpu\")))\n",
|
565 |
+
" return model"
|
566 |
+
]
|
567 |
+
},
|
568 |
+
{
|
569 |
+
"cell_type": "code",
|
570 |
+
"execution_count": null,
|
571 |
+
"metadata": {
|
572 |
+
"id": "YZEM0lIlsswj",
|
573 |
+
"outputId": "c05d70cf-e75d-4514-b730-3070484ceee3",
|
574 |
+
"trusted": true
|
575 |
+
},
|
576 |
+
"outputs": [],
|
577 |
+
"source": [
|
578 |
+
"# Initialize model check\n",
|
579 |
+
"num_classes = len(ds[\"Category\"].unique())\n",
|
580 |
+
"model = CustomModel(num_classes=num_classes, dropout_pb=Cfg.dropout_pb)\n",
|
581 |
+
"print(model.named_parameters)"
|
582 |
+
]
|
583 |
+
},
|
584 |
+
{
|
585 |
+
"cell_type": "markdown",
|
586 |
+
"metadata": {
|
587 |
+
"id": "ztUd4m9CN8qM"
|
588 |
+
},
|
589 |
+
"source": [
|
590 |
+
"## Training"
|
591 |
+
]
|
592 |
+
},
|
593 |
+
{
|
594 |
+
"cell_type": "code",
|
595 |
+
"execution_count": null,
|
596 |
+
"metadata": {
|
597 |
+
"id": "a3VPiwjqsswk",
|
598 |
+
"trusted": true
|
599 |
+
},
|
600 |
+
"outputs": [],
|
601 |
+
"source": [
|
602 |
+
"def train_step(train_loader: DataLoader, model, num_classes: int, loss_fn, optimizer, epoch: int) -> float:\n",
|
603 |
+
" \"\"\"Train step.\"\"\"\n",
|
604 |
+
" model.train()\n",
|
605 |
+
" loss = 0.0\n",
|
606 |
+
" total_iterations = len(train_loader)\n",
|
607 |
+
" desc = f\"Training - Epoch {epoch+1}\"\n",
|
608 |
+
" for step, (inputs, labels) in tqdm(enumerate(train_loader), total=total_iterations, desc=desc):\n",
|
609 |
+
" inputs = collate(inputs)\n",
|
610 |
+
" for k, v in inputs.items():\n",
|
611 |
+
" inputs[k] = v.to(device)\n",
|
612 |
+
" labels = labels.to(device)\n",
|
613 |
+
" optimizer.zero_grad() # reset gradients\n",
|
614 |
+
" y_pred = model(inputs) # forward pass\n",
|
615 |
+
" targets = F.one_hot(labels.long(), num_classes=num_classes).float() # one-hot (for loss_fn)\n",
|
616 |
+
" J = loss_fn(y_pred, targets) # define loss\n",
|
617 |
+
" J.backward() # backward pass\n",
|
618 |
+
" optimizer.step() # update weights\n",
|
619 |
+
" loss += (J.detach().item() - loss) / (step + 1) # cumulative loss\n",
|
620 |
+
" return loss\n",
|
621 |
+
"\n",
|
622 |
+
"\n",
|
623 |
+
"def eval_step(val_loader: DataLoader, model, num_classes: int, loss_fn, epoch: int) -> Tuple[float, np.ndarray, np.ndarray]:\n",
|
624 |
+
" \"\"\"Eval step.\"\"\"\n",
|
625 |
+
" model.eval()\n",
|
626 |
+
" loss = 0.0\n",
|
627 |
+
" total_iterations = len(val_loader)\n",
|
628 |
+
" desc = f\"Validation - Epoch {epoch+1}\"\n",
|
629 |
+
" y_trues, y_preds = [], []\n",
|
630 |
+
" with torch.inference_mode():\n",
|
631 |
+
" for step, (inputs, labels) in tqdm(enumerate(val_loader), total=total_iterations, desc=desc):\n",
|
632 |
+
" inputs = collate(inputs)\n",
|
633 |
+
" for k, v in inputs.items():\n",
|
634 |
+
" inputs[k] = v.to(device)\n",
|
635 |
+
" labels = labels.to(device)\n",
|
636 |
+
" y_pred = model(inputs)\n",
|
637 |
+
" targets = F.one_hot(labels.long(), num_classes=num_classes).float() # one-hot (for loss_fn)\n",
|
638 |
+
" J = loss_fn(y_pred, targets).item()\n",
|
639 |
+
" loss += (J - loss) / (step + 1)\n",
|
640 |
+
" y_trues.extend(targets.cpu().numpy())\n",
|
641 |
+
" y_preds.extend(torch.argmax(y_pred, dim=1).cpu().numpy())\n",
|
642 |
+
" return loss, np.vstack(y_trues), np.vstack(y_preds)"
|
643 |
+
]
|
644 |
+
},
|
645 |
+
{
|
646 |
+
"cell_type": "markdown",
|
647 |
+
"metadata": {},
|
648 |
+
"source": [
|
649 |
+
"### Sweep config"
|
650 |
+
]
|
651 |
+
},
|
652 |
+
{
|
653 |
+
"cell_type": "code",
|
654 |
+
"execution_count": null,
|
655 |
+
"metadata": {
|
656 |
+
"trusted": true
|
657 |
+
},
|
658 |
+
"outputs": [],
|
659 |
+
"source": [
|
660 |
+
"sweep_config = {\"method\": \"random\"}\n",
|
661 |
+
"\n",
|
662 |
+
"metric = {\"name\": \"val_loss\", \"goal\": \"minimize\"}\n",
|
663 |
+
"\n",
|
664 |
+
"sweep_config[\"metric\"] = metric\n",
|
665 |
+
"\n",
|
666 |
+
"parameters_dict = {\n",
|
667 |
+
" \"dropout_pb\": {\n",
|
668 |
+
" \"values\": [0.3, 0.4, 0.5],\n",
|
669 |
+
" },\n",
|
670 |
+
" \"learning_rate\": {\n",
|
671 |
+
" \"values\": [0.0001, 0.001, 0.01],\n",
|
672 |
+
" },\n",
|
673 |
+
" \"batch_size\": {\n",
|
674 |
+
" \"values\": [32, 64, 128],\n",
|
675 |
+
" },\n",
|
676 |
+
" \"lr_reduce_factor\": {\n",
|
677 |
+
" \"values\": [0.5, 0.6, 0.7, 0.8],\n",
|
678 |
+
" },\n",
|
679 |
+
" \"lr_reduce_patience\": {\n",
|
680 |
+
" \"values\": [2, 3, 4, 5],\n",
|
681 |
+
" },\n",
|
682 |
+
" \"epochs\": {\"value\": 1},\n",
|
683 |
+
"}\n",
|
684 |
+
"\n",
|
685 |
+
"sweep_config[\"parameters\"] = parameters_dict"
|
686 |
+
]
|
687 |
+
},
|
688 |
+
{
|
689 |
+
"cell_type": "code",
|
690 |
+
"execution_count": null,
|
691 |
+
"metadata": {
|
692 |
+
"trusted": true
|
693 |
+
},
|
694 |
+
"outputs": [],
|
695 |
+
"source": [
|
696 |
+
"# create sweep\n",
|
697 |
+
"if Cfg.wandb_sweep:\n",
|
698 |
+
" sweep_id = wandb.sweep(sweep_config, project=\"NewsClassifier\")"
|
699 |
+
]
|
700 |
+
},
|
701 |
+
{
|
702 |
+
"cell_type": "code",
|
703 |
+
"execution_count": null,
|
704 |
+
"metadata": {
|
705 |
+
"id": "oG-4tz-Lsswk",
|
706 |
+
"trusted": true
|
707 |
+
},
|
708 |
+
"outputs": [],
|
709 |
+
"source": [
|
710 |
+
"def train_loop(config=None):\n",
|
711 |
+
" # ====================================================\n",
|
712 |
+
" # loader\n",
|
713 |
+
" # ====================================================\n",
|
714 |
+
"\n",
|
715 |
+
" if not Cfg.wandb_sweep:\n",
|
716 |
+
" config = dict(\n",
|
717 |
+
" batch_size=Cfg.batch_size,\n",
|
718 |
+
" num_classes=7,\n",
|
719 |
+
" epochs=Cfg.epochs,\n",
|
720 |
+
" dropout_pb=Cfg.dropout_pb,\n",
|
721 |
+
" learning_rate=Cfg.lr,\n",
|
722 |
+
" lr_reduce_factor=Cfg.lr_redfactor,\n",
|
723 |
+
" lr_reduce_patience=Cfg.lr_redpatience,\n",
|
724 |
+
" )\n",
|
725 |
+
"\n",
|
726 |
+
" with wandb.init(project=\"NewsClassifier\", config=config):\n",
|
727 |
+
" config = wandb.config\n",
|
728 |
+
"\n",
|
729 |
+
" train_ds, val_ds = train_test_split(ds, test_size=Cfg.test_size, stratify=ds[\"Category\"])\n",
|
730 |
+
"\n",
|
731 |
+
" train_dataset = NewsDataset(train_ds)\n",
|
732 |
+
" valid_dataset = NewsDataset(val_ds)\n",
|
733 |
+
"\n",
|
734 |
+
" train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)\n",
|
735 |
+
" valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False)\n",
|
736 |
+
"\n",
|
737 |
+
" # ====================================================\n",
|
738 |
+
" # model\n",
|
739 |
+
" # ====================================================\n",
|
740 |
+
" num_classes = 7\n",
|
741 |
+
" device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
|
742 |
+
"\n",
|
743 |
+
" model = CustomModel(num_classes=num_classes, dropout_pb=config.dropout_pb)\n",
|
744 |
+
" model.to(device)\n",
|
745 |
+
"\n",
|
746 |
+
" # ====================================================\n",
|
747 |
+
" # Training components\n",
|
748 |
+
" # ====================================================\n",
|
749 |
+
" criterion = nn.BCEWithLogitsLoss()\n",
|
750 |
+
" optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)\n",
|
751 |
+
" scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
|
752 |
+
" optimizer, mode=\"min\", factor=config.lr_reduce_factor, patience=config.lr_reduce_patience\n",
|
753 |
+
" )\n",
|
754 |
+
"\n",
|
755 |
+
" # ====================================================\n",
|
756 |
+
" # loop\n",
|
757 |
+
" # ====================================================\n",
|
758 |
+
" wandb.watch(model, criterion, log=\"all\", log_freq=10)\n",
|
759 |
+
"\n",
|
760 |
+
" min_loss = np.inf\n",
|
761 |
+
"\n",
|
762 |
+
" for epoch in range(config.epochs):\n",
|
763 |
+
" start_time = time.time()\n",
|
764 |
+
"\n",
|
765 |
+
" # Step\n",
|
766 |
+
" train_loss = train_step(train_loader, model, num_classes, criterion, optimizer, epoch)\n",
|
767 |
+
" val_loss, _, _ = eval_step(valid_loader, model, num_classes, criterion, epoch)\n",
|
768 |
+
" scheduler.step(val_loss)\n",
|
769 |
+
"\n",
|
770 |
+
" # scoring\n",
|
771 |
+
" elapsed = time.time() - start_time\n",
|
772 |
+
" wandb.log({\"epoch\": epoch + 1, \"train_loss\": train_loss, \"val_loss\": val_loss})\n",
|
773 |
+
" print(f\"Epoch {epoch+1} - avg_train_loss: {train_loss:.4f} avg_val_loss: {val_loss:.4f} time: {elapsed:.0f}s\")\n",
|
774 |
+
"\n",
|
775 |
+
" if min_loss > val_loss:\n",
|
776 |
+
" min_loss = val_loss\n",
|
777 |
+
" print(\"Best Score : saving model.\")\n",
|
778 |
+
" os.makedirs(\"../artifacts\", exist_ok=True)\n",
|
779 |
+
" model.save(\"../artifacts\")\n",
|
780 |
+
" print(f\"\\nSaved Best Model Score: {min_loss:.4f}\\n\\n\")\n",
|
781 |
+
"\n",
|
782 |
+
" wandb.save(\"../artifacts/model.pt\")\n",
|
783 |
+
" torch.cuda.empty_cache()\n",
|
784 |
+
" gc.collect()"
|
785 |
+
]
|
786 |
+
},
|
787 |
+
{
|
788 |
+
"cell_type": "code",
|
789 |
+
"execution_count": null,
|
790 |
+
"metadata": {
|
791 |
+
"id": "tIBl_kvssswk",
|
792 |
+
"outputId": "4bff057f-a3a7-45ca-f3c2-5b5fbd15bab5",
|
793 |
+
"trusted": true
|
794 |
+
},
|
795 |
+
"outputs": [],
|
796 |
+
"source": [
|
797 |
+
"# Train/Tune\n",
|
798 |
+
"if not Cfg.wandb_sweep:\n",
|
799 |
+
" train_loop()\n",
|
800 |
+
"else:\n",
|
801 |
+
" wandb.agent(sweep_id, train_loop, count=10)"
|
802 |
+
]
|
803 |
+
},
|
804 |
+
{
|
805 |
+
"cell_type": "markdown",
|
806 |
+
"metadata": {
|
807 |
+
"id": "qxXv-FaNNtKJ"
|
808 |
+
},
|
809 |
+
"source": [
|
810 |
+
"## Inference"
|
811 |
+
]
|
812 |
+
},
|
813 |
+
{
|
814 |
+
"cell_type": "code",
|
815 |
+
"execution_count": 34,
|
816 |
+
"metadata": {
|
817 |
+
"id": "SHCGJBhABesw",
|
818 |
+
"outputId": "a62f9ff6-d47d-46d0-f971-cfeb76adc6d5",
|
819 |
+
"trusted": true
|
820 |
+
},
|
821 |
+
"outputs": [
|
822 |
+
{
|
823 |
+
"name": "stderr",
|
824 |
+
"output_type": "stream",
|
825 |
+
"text": [
|
826 |
+
"Some weights of RobertaModel were not initialized from the model checkpoint at roberta-base and are newly initialized: ['roberta.pooler.dense.weight', 'roberta.pooler.dense.bias']\n",
|
827 |
+
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
|
828 |
+
]
|
829 |
+
},
|
830 |
+
{
|
831 |
+
"data": {
|
832 |
+
"text/plain": [
|
833 |
+
"CustomModel(\n",
|
834 |
+
" (model): RobertaModel(\n",
|
835 |
+
" (embeddings): RobertaEmbeddings(\n",
|
836 |
+
" (word_embeddings): Embedding(50265, 768, padding_idx=1)\n",
|
837 |
+
" (position_embeddings): Embedding(514, 768, padding_idx=1)\n",
|
838 |
+
" (token_type_embeddings): Embedding(1, 768)\n",
|
839 |
+
" (LayerNorm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
|
840 |
+
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
841 |
+
" )\n",
|
842 |
+
" (encoder): RobertaEncoder(\n",
|
843 |
+
" (layer): ModuleList(\n",
|
844 |
+
" (0-11): 12 x RobertaLayer(\n",
|
845 |
+
" (attention): RobertaAttention(\n",
|
846 |
+
" (self): RobertaSelfAttention(\n",
|
847 |
+
" (query): Linear(in_features=768, out_features=768, bias=True)\n",
|
848 |
+
" (key): Linear(in_features=768, out_features=768, bias=True)\n",
|
849 |
+
" (value): Linear(in_features=768, out_features=768, bias=True)\n",
|
850 |
+
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
851 |
+
" )\n",
|
852 |
+
" (output): RobertaSelfOutput(\n",
|
853 |
+
" (dense): Linear(in_features=768, out_features=768, bias=True)\n",
|
854 |
+
" (LayerNorm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
|
855 |
+
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
856 |
+
" )\n",
|
857 |
+
" )\n",
|
858 |
+
" (intermediate): RobertaIntermediate(\n",
|
859 |
+
" (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
|
860 |
+
" (intermediate_act_fn): GELUActivation()\n",
|
861 |
+
" )\n",
|
862 |
+
" (output): RobertaOutput(\n",
|
863 |
+
" (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
|
864 |
+
" (LayerNorm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
|
865 |
+
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
866 |
+
" )\n",
|
867 |
+
" )\n",
|
868 |
+
" )\n",
|
869 |
+
" )\n",
|
870 |
+
" (pooler): RobertaPooler(\n",
|
871 |
+
" (dense): Linear(in_features=768, out_features=768, bias=True)\n",
|
872 |
+
" (activation): Tanh()\n",
|
873 |
+
" )\n",
|
874 |
+
" )\n",
|
875 |
+
" (dropout): Dropout(p=0.0, inplace=False)\n",
|
876 |
+
" (fc): Linear(in_features=768, out_features=7, bias=True)\n",
|
877 |
+
")"
|
878 |
+
]
|
879 |
+
},
|
880 |
+
"execution_count": 34,
|
881 |
+
"metadata": {},
|
882 |
+
"output_type": "execute_result"
|
883 |
+
}
|
884 |
+
],
|
885 |
+
"source": [
|
886 |
+
"model = CustomModel(num_classes=7)\n",
|
887 |
+
"model.load_state_dict(torch.load(\"../artifacts/model.pt\", map_location=torch.device(\"cpu\")))\n",
|
888 |
+
"model.to(device)"
|
889 |
+
]
|
890 |
+
},
|
891 |
+
{
|
892 |
+
"cell_type": "code",
|
893 |
+
"execution_count": null,
|
894 |
+
"metadata": {
|
895 |
+
"id": "BjupBkbOCI22",
|
896 |
+
"trusted": true
|
897 |
+
},
|
898 |
+
"outputs": [],
|
899 |
+
"source": [
|
900 |
+
"def test_step(test_loader: DataLoader, model, num_classes: int) -> Tuple[np.ndarray, np.ndarray]:\n",
|
901 |
+
" \"\"\"Eval step.\"\"\"\n",
|
902 |
+
" model.eval()\n",
|
903 |
+
" y_trues, y_preds = [], []\n",
|
904 |
+
" with torch.inference_mode():\n",
|
905 |
+
" for step, (inputs, labels) in tqdm(enumerate(test_loader)):\n",
|
906 |
+
" inputs = collate(inputs)\n",
|
907 |
+
" for k, v in inputs.items():\n",
|
908 |
+
" inputs[k] = v.to(device)\n",
|
909 |
+
" labels = labels.to(device)\n",
|
910 |
+
" y_pred = model(inputs)\n",
|
911 |
+
" y_trues.extend(labels.cpu().numpy())\n",
|
912 |
+
" y_preds.extend(torch.argmax(y_pred, dim=1).cpu().numpy())\n",
|
913 |
+
" return np.vstack(y_trues), np.vstack(y_preds)"
|
914 |
+
]
|
915 |
+
},
|
916 |
+
{
|
917 |
+
"cell_type": "code",
|
918 |
+
"execution_count": null,
|
919 |
+
"metadata": {
|
920 |
+
"id": "QimlSstFDsbJ",
|
921 |
+
"outputId": "8c903f7f-eddd-417c-c85e-4d57a4206501",
|
922 |
+
"trusted": true
|
923 |
+
},
|
924 |
+
"outputs": [],
|
925 |
+
"source": [
|
926 |
+
"test_dataset = NewsDataset(val_ds)\n",
|
927 |
+
"test_loader = DataLoader(test_dataset, batch_size=Cfg.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False)\n",
|
928 |
+
"\n",
|
929 |
+
"y_true, y_pred = test_step(test_loader, model, 7)"
|
930 |
+
]
|
931 |
+
},
|
932 |
+
{
|
933 |
+
"cell_type": "code",
|
934 |
+
"execution_count": null,
|
935 |
+
"metadata": {
|
936 |
+
"id": "CLz_GuoeEEgz",
|
937 |
+
"outputId": "8870b27c-46a6-4695-e526-e5c1e778f96a",
|
938 |
+
"trusted": true
|
939 |
+
},
|
940 |
+
"outputs": [],
|
941 |
+
"source": [
|
942 |
+
"print(\n",
|
943 |
+
" f'Precision: {precision_score(y_true, y_pred, average=\"weighted\")} \\n Recall: {recall_score(y_true, y_pred, average=\"weighted\")} \\n F1: {f1_score(y_true, y_pred, average=\"weighted\")} \\n Accuracy: {accuracy_score(y_true, y_pred)}'\n",
|
944 |
+
")"
|
945 |
+
]
|
946 |
+
},
|
947 |
+
{
|
948 |
+
"cell_type": "markdown",
|
949 |
+
"metadata": {
|
950 |
+
"id": "j_D8B0aNOBiI"
|
951 |
+
},
|
952 |
+
"source": [
|
953 |
+
"## Prediction on single sample"
|
954 |
+
]
|
955 |
+
},
|
956 |
+
{
|
957 |
+
"cell_type": "code",
|
958 |
+
"execution_count": null,
|
959 |
+
"metadata": {},
|
960 |
+
"outputs": [],
|
961 |
+
"source": [
|
962 |
+
"val_ds"
|
963 |
+
]
|
964 |
+
},
|
965 |
+
{
|
966 |
+
"cell_type": "code",
|
967 |
+
"execution_count": 36,
|
968 |
+
"metadata": {
|
969 |
+
"id": "-wU3xnKkH0Tt",
|
970 |
+
"outputId": "171245e5-4844-4e71-82b7-a0f3e97879e7",
|
971 |
+
"trusted": true
|
972 |
+
},
|
973 |
+
"outputs": [
|
974 |
+
{
|
975 |
+
"name": "stdout",
|
976 |
+
"output_type": "stream",
|
977 |
+
"text": [
|
978 |
+
"Ground Truth: 5, Sports\n",
|
979 |
+
"Predicted: 5, Sports\n",
|
980 |
+
"Predicted Probabilities: [9.8119999e-05 1.0613000e-04 7.7200002e-06 3.2520002e-05 8.3100003e-06\n",
|
981 |
+
" 9.9973667e-01 1.0560000e-05]\n"
|
982 |
+
]
|
983 |
+
}
|
984 |
+
],
|
985 |
+
"source": [
|
986 |
+
"sample = 2\n",
|
987 |
+
"sample_input = prepare_input(tokenizer, val_ds[\"Text\"].values[sample])\n",
|
988 |
+
"\n",
|
989 |
+
"cats = df[\"Category\"].unique().tolist()\n",
|
990 |
+
"num_classes = len(cats)\n",
|
991 |
+
"class_to_index = {tag: i for i, tag in enumerate(cats)}\n",
|
992 |
+
"index_to_class = {v: k for k, v in class_to_index.items()}\n",
|
993 |
+
"\n",
|
994 |
+
"label = val_ds[\"Category\"].values[sample]\n",
|
995 |
+
"input_ids = torch.unsqueeze(sample_input[\"input_ids\"], 0).to(device)\n",
|
996 |
+
"attention_masks = torch.unsqueeze(sample_input[\"attention_mask\"], 0).to(device)\n",
|
997 |
+
"test_sample = dict(input_ids=input_ids, attention_mask=attention_masks)\n",
|
998 |
+
"\n",
|
999 |
+
"with torch.no_grad():\n",
|
1000 |
+
" y_pred_test_sample = model.predict_proba(test_sample)\n",
|
1001 |
+
" print(f\"Ground Truth: {label}, {index_to_class[int(label)]}\")\n",
|
1002 |
+
" print(f\"Predicted: {np.argmax(y_pred_test_sample)}, {index_to_class[int(np.argmax(y_pred_test_sample))]}\")\n",
|
1003 |
+
" print(f\"Predicted Probabilities: {np.round(y_pred_test_sample, 8)[0]}\")"
|
1004 |
+
]
|
1005 |
+
},
|
1006 |
+
{
|
1007 |
+
"cell_type": "code",
|
1008 |
+
"execution_count": null,
|
1009 |
+
"metadata": {},
|
1010 |
+
"outputs": [],
|
1011 |
+
"source": []
|
1012 |
+
}
|
1013 |
+
],
|
1014 |
+
"metadata": {
|
1015 |
+
"kernelspec": {
|
1016 |
+
"display_name": "Python 3",
|
1017 |
+
"language": "python",
|
1018 |
+
"name": "python3"
|
1019 |
+
},
|
1020 |
+
"language_info": {
|
1021 |
+
"codemirror_mode": {
|
1022 |
+
"name": "ipython",
|
1023 |
+
"version": 3
|
1024 |
+
},
|
1025 |
+
"file_extension": ".py",
|
1026 |
+
"mimetype": "text/x-python",
|
1027 |
+
"name": "python",
|
1028 |
+
"nbconvert_exporter": "python",
|
1029 |
+
"pygments_lexer": "ipython3",
|
1030 |
+
"version": "3.10.13"
|
1031 |
+
}
|
1032 |
+
},
|
1033 |
+
"nbformat": 4,
|
1034 |
+
"nbformat_minor": 4
|
1035 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiosignal==1.3.1
|
2 |
+
attrs==23.1.0
|
3 |
+
certifi==2023.7.22
|
4 |
+
charset-normalizer==3.3.1
|
5 |
+
click==8.1.7
|
6 |
+
colorama==0.4.6
|
7 |
+
contourpy==1.1.1
|
8 |
+
cycler==0.12.1
|
9 |
+
filelock==3.12.4
|
10 |
+
fonttools==4.43.1
|
11 |
+
frozenlist==1.4.0
|
12 |
+
idna==3.4
|
13 |
+
jsonschema==4.19.1
|
14 |
+
jsonschema-specifications==2023.7.1
|
15 |
+
kiwisolver==1.4.5
|
16 |
+
matplotlib==3.8.0
|
17 |
+
msgpack==1.0.7
|
18 |
+
numpy==1.26.1
|
19 |
+
packaging==23.2
|
20 |
+
pandas==2.1.2
|
21 |
+
Pillow==10.1.0
|
22 |
+
protobuf==4.24.4
|
23 |
+
pyparsing==3.1.1
|
24 |
+
python-dateutil==2.8.2
|
25 |
+
pytz==2023.3.post1
|
26 |
+
PyYAML==6.0.1
|
27 |
+
ray==2.7.1
|
28 |
+
referencing==0.30.2
|
29 |
+
requests==2.31.0
|
30 |
+
rpds-py==0.10.6
|
31 |
+
seaborn==0.13.0
|
32 |
+
six==1.16.0
|
33 |
+
tzdata==2023.3
|
34 |
+
urllib3==2.0.7
|
setup.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
|
3 |
+
from setuptools import find_packages, setup
|
4 |
+
|
5 |
+
|
6 |
+
def get_requirements(file_path: str) -> List[str]:
|
7 |
+
"""Get the requirements/dependencies (packages) in a list."""
|
8 |
+
with open(file_path) as f:
|
9 |
+
lines = f.readlines()
|
10 |
+
requirements = [line.rstrip("\n") for line in lines]
|
11 |
+
|
12 |
+
return requirements
|
13 |
+
|
14 |
+
|
15 |
+
setup(
|
16 |
+
name="NewsClassifier",
|
17 |
+
version="1.0",
|
18 |
+
author="ManishW",
|
19 |
+
author_email="[email protected]",
|
20 |
+
description="",
|
21 |
+
packages=find_packages(),
|
22 |
+
install_requires=get_requirements("requirements.txt"),
|
23 |
+
)
|