{
"cells": [
{
"cell_type": "raw",
"metadata": {},
"source": [
"---\n",
"title: 09 Continuous Bag of Words (CBOW) Text Classifier\n",
"description: Build a continuous bag of words text classifier.\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"
\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "FXO-zuq0o5cU"
},
"source": [
"# Continuous Bag of Words (CBOW) Text Classifier\n",
"\n",
"The code below implements a continuous bag of words text classifier.\n",
"- We tokenize the text, create a vocabulary and encode each piece of text in the dataset\n",
"- The lookup allows for extracting embeddings for each tokenized input\n",
"- The embedding vectors are added together\n",
"- The resulting vector is multiplied with a weight matrix, which is then added a bias vector; this results in scores\n",
"- The scores are applied a softmax to generate probabilities which are used for the final classification\n",
"\n",
"The code used in this notebook was inspired by code from the [official repo](https://github.com/neubig/nn4nlp-code) used in the [CMU Neural Networks for NLP class](http://www.phontron.com/class/nn4nlp2021/schedule.html) by [Graham Neubig](http://www.phontron.com/index.php). \n",
"\n",
""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ORP_xNj9o3md"
},
"outputs": [],
"source": [
"import torch\n",
"import random\n",
"import torch.nn as nn"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "NO7P5X0tqr-N"
},
"outputs": [],
"source": [
"%%capture\n",
"\n",
"# download the files\n",
"!wget https://raw.githubusercontent.com/neubig/nn4nlp-code/master/data/classes/dev.txt\n",
"!wget https://raw.githubusercontent.com/neubig/nn4nlp-code/master/data/classes/test.txt\n",
"!wget https://raw.githubusercontent.com/neubig/nn4nlp-code/master/data/classes/train.txt\n",
"\n",
"# create the data folders\n",
"!mkdir data data/classes\n",
"!cp dev.txt data/classes\n",
"!cp test.txt data/classes\n",
"!cp train.txt data/classes"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Wa83HOUIq5EP"
},
"source": [
"## Read and Process Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "D31E3u_UqwTc"
},
"outputs": [],
"source": [
"# function to read in data, process each line and split columns by \" ||| \"\n",
"def read_data(filename):\n",
" data = []\n",
" with open(filename, 'r') as f:\n",
" for line in f:\n",
" line = line.lower().strip()\n",
" line = line.split(' ||| ')\n",
" data.append(line)\n",
" return data\n",
"\n",
"train_data = read_data('data/classes/train.txt')\n",
"test_data = read_data('data/classes/test.txt')\n",
"\n",
"# creating the word and tag indices\n",
"word_to_index = {}\n",
"word_to_index[\"\"] = len(word_to_index) # add to dictionary\n",
"tag_to_index = {}\n",
"\n",
"# create word to index dictionary and tag to index dictionary from data\n",
"def create_dict(data, check_unk=False):\n",
" for line in data:\n",
" for word in line[1].split(\" \"):\n",
" if check_unk == False:\n",
" if word not in word_to_index:\n",
" word_to_index[word] = len(word_to_index)\n",
" else:\n",
" if word not in word_to_index:\n",
" word_to_index[word] = word_to_index[\"\"]\n",
"\n",
" if line[0] not in tag_to_index:\n",
" tag_to_index[line[0]] = len(tag_to_index)\n",
"\n",
"create_dict(train_data)\n",
"create_dict(test_data, check_unk=True)\n",
"\n",
"# create word and tag tensors from data\n",
"def create_tensor(data):\n",
" for line in data:\n",
" yield([word_to_index[word] for word in line[1].split(\" \")], tag_to_index[line[0]])\n",
"\n",
"train_data = list(create_tensor(train_data))\n",
"test_data = list(create_tensor(test_data))\n",
"\n",
"number_of_words = len(word_to_index)\n",
"number_of_tags = len(tag_to_index)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "cNsjv5misKIi"
},
"source": [
"## Model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "7JPQ9OUZrC6z"
},
"outputs": [],
"source": [
"# cpu or gpu\n",
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
"\n",
"# create a simple neural network with embedding layer, bias, and xavier initialization\n",
"class CBoW(torch.nn.Module):\n",
" def __init__(self, nwords, ntags, emb_size):\n",
" super(CBoW, self).__init__()\n",
"\n",
" # layers\n",
" self.embedding = torch.nn.Embedding(nwords, emb_size)\n",
" self.linear = torch.nn.Linear(emb_size, ntags)\n",
"\n",
" # use xavier initialization for weights\n",
" nn.init.xavier_uniform_(self.embedding.weight)\n",
" nn.init.xavier_uniform_(self.linear.weight)\n",
"\n",
" def forward(self, x):\n",
" emb = self.embedding(x) # seq x emb_size\n",
" out = torch.sum(emb, dim=0) # emb_size\n",
" out = out.view(1, -1) # reshape to (1, emb_size)\n",
" out = self.linear(out) # 1 x ntags\n",
" return out\n",
"\n",
"EMB_SIZE = 64\n",
"model = CBoW(number_of_words, number_of_tags, EMB_SIZE)\n",
"criterion = torch.nn.CrossEntropyLoss()\n",
"optimizer = torch.optim.Adam(model.parameters())\n",
"type = torch.LongTensor\n",
"\n",
"if torch.cuda.is_available():\n",
" model.to(device)\n",
" type = torch.cuda.LongTensor"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "aycOgcVssgZI",
"outputId": "efe7bc92-5699-43d4-b382-54fd24d06134"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch: 1 | train loss/sent: 1.4089 | train accuracy: 0.3826 | test accuracy: 0.4149\n",
"epoch: 2 | train loss/sent: 0.9089 | train accuracy: 0.6358 | test accuracy: 0.4104\n",
"epoch: 3 | train loss/sent: 0.5298 | train accuracy: 0.8076 | test accuracy: 0.3837\n",
"epoch: 4 | train loss/sent: 0.3289 | train accuracy: 0.8864 | test accuracy: 0.3670\n",
"epoch: 5 | train loss/sent: 0.2179 | train accuracy: 0.9254 | test accuracy: 0.3851\n",
"epoch: 6 | train loss/sent: 0.1529 | train accuracy: 0.9467 | test accuracy: 0.3774\n",
"epoch: 7 | train loss/sent: 0.1131 | train accuracy: 0.9594 | test accuracy: 0.3774\n",
"epoch: 8 | train loss/sent: 0.0835 | train accuracy: 0.9719 | test accuracy: 0.3643\n",
"epoch: 9 | train loss/sent: 0.0594 | train accuracy: 0.9795 | test accuracy: 0.3566\n",
"epoch: 10 | train loss/sent: 0.0477 | train accuracy: 0.9837 | test accuracy: 0.3706\n"
]
}
],
"source": [
"# perform training of the Bow model\n",
"\n",
"for epoch in range(10):\n",
" # perform training\n",
" model.train()\n",
" random.shuffle(train_data)\n",
" total_loss = 0.0\n",
" train_correct = 0\n",
" for sentence, tag in train_data:\n",
" sentence = torch.tensor(sentence).type(type)\n",
" tag = torch.tensor([tag]).type(type)\n",
" output = model(sentence)\n",
" predicted = torch.argmax(output.data.detach()).item()\n",
" \n",
" loss = criterion(output, tag)\n",
" total_loss += loss.item()\n",
"\n",
" optimizer.zero_grad()\n",
" loss.backward()\n",
" optimizer.step()\n",
"\n",
" if predicted == tag: train_correct+=1\n",
"\n",
" # perform testing of the model\n",
" model.eval()\n",
" test_correct = 0\n",
" for sentence, tag in test_data:\n",
" sentence = torch.tensor(sentence).type(type)\n",
" output = model(sentence)\n",
" predicted = torch.argmax(output.data.detach()).item()\n",
" if predicted == tag: test_correct += 1\n",
" \n",
" # print model performance results\n",
" log = f'epoch: {epoch+1} | ' \\\n",
" f'train loss/sent: {total_loss/len(train_data):.4f} | ' \\\n",
" f'train accuracy: {train_correct/len(train_data):.4f} | ' \\\n",
" f'test accuracy: {test_correct/len(test_data):.4f}'\n",
" print(log)"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"name": "cbow.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.12"
}
},
"nbformat": 4,
"nbformat_minor": 1
}