hierarchical_softmax_loss / hierarchical_softmax_loss.py
danieldux's picture
hmax
5966251
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add a description here."""
from hierarchicalsoftmax import HierarchicalSoftmaxLoss
import evaluate
import datasets
import pickle
import torch
import torch.nn as nn
# TODO: Add BibTeX citation
_CITATION = """\
@InProceedings{huggingface:module,
title = {Hierarchical Softmax Loss},
authors={Danieldux},
year={2023}
}
"""
# TODO: Add description of the module here
_DESCRIPTION = """\
This new module is designed to solve this great ML task and is crafted with a lot of care.
"""
# TODO: Add description of the arguments of the module here
_KWARGS_DESCRIPTION = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of predictions to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Returns:
accuracy: description of the first score,
another_score: description of the second score,
Examples:
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> my_new_module = evaluate.load("my_new_module")
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
>>> print(results)
{'accuracy': 1.0}
"""
# TODO: Define external resources urls if needed
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class HierarchicalISCOSoftmaxLoss(evaluate.Metric):
"""TODO: Short description of my evaluation module."""
def _info(self):
# TODO: Specifies the evaluate.EvaluationModuleInfo object
return evaluate.MetricInfo(
# This is the description that will appear on the modules page.
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
features=datasets.Features({
'predictions': datasets.Value('int64'),
'references': datasets.Value('int64'),
}),
# Homepage of the module for documentation
homepage="http://module.homepage",
# Additional links to the codebase or references
codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
reference_urls=["http://path.to.reference.url/new_module"]
)
def _download_and_prepare(self, dl_manager):
"""Optional: download external resources useful to compute the scores"""
# TODO: Download ISCO hierachical metadata
pass
class HierarchicalLossNetwork:
"""Logics to calculate the loss of the model.
"""
def __init__(self, metafile_path, hierarchical_labels, device='cpu', total_level=2, alpha=1, beta=0.8, p_loss=3):
"""Param init.
"""
self.total_level = total_level
self.alpha = alpha
self.beta = beta
self.p_loss = p_loss
self.device = device
self.level_one_labels, self.level_two_labels, self.level_three_labels, self.level_four_labels = read_meta(metafile=metafile_path)
self.hierarchical_labels = hierarchical_labels
self.numeric_hierarchy = self.words_to_indices()
def read_meta(metafile):
"""Read the meta file and return the coarse and fine labels.
"""
# TODO: Replace with metadata from the dataset
meta_data = unpickle(metafile)
fine_label_names = [t.decode('utf8') for t in meta_data[b'fine_label_names']]
coarse_label_names = [t.decode('utf8') for t in meta_data[b'coarse_label_names']]
return coarse_label_names, fine_label_names
def hierarchical_softmax_loss_fn(logits: torch.Tensor, labels: torch.Tensor, root) -> torch.Tensor:
loss = HierarchicalSoftmaxLoss(root=root)
return loss(logits, labels)
def words_to_indices(self):
"""Convert the classes from words to indices."""
numeric_hierarchy = {}
for k, v in self.hierarchical_labels.items():
numeric_hierarchy[self.level_one_labels.index(k)] = [self.level_two_labels.index(i) for i in v]
return numeric_hierarchy
def check_hierarchy(self, current_level, previous_level):
"""
Check if the predicted class at level l is a child of the class predicted at level l-1 for the entire batch.
"""
#check using the dictionary whether the current level's prediction belongs to the superclass (prediction from the prev layer)
bool_tensor = [not current_level[i] in self.numeric_hierarchy[previous_level[i].item()] for i in range(previous_level.size()[0])]
return torch.FloatTensor(bool_tensor).to(self.device)
def calculate_lloss(self, predictions, true_labels):
"""Calculates the layer loss."""
lloss = 0
for l in range(self.total_level):
lloss += nn.CrossEntropyLoss()(predictions[l], true_labels[l])
return self.alpha * lloss
def calculate_dloss(self, predictions, true_labels):
"""Calculate the dependence loss."""
dloss = 0
for l in range(1, self.total_level):
current_lvl_pred = torch.argmax(nn.Softmax(dim=1)(predictions[l]), dim=1)
prev_lvl_pred = torch.argmax(nn.Softmax(dim=1)(predictions[l-1]), dim=1)
D_l = self.check_hierarchy(current_lvl_pred, prev_lvl_pred)
l_prev = torch.where(prev_lvl_pred == true_labels[l-1], torch.FloatTensor([0]).to(self.device), torch.FloatTensor([1]).to(self.device))
l_curr = torch.where(current_lvl_pred == true_labels[l], torch.FloatTensor([0]).to(self.device), torch.FloatTensor([1]).to(self.device))
dloss += torch.sum(torch.pow(self.p_loss, D_l*l_prev)*torch.pow(self.p_loss, D_l*l_curr) - 1)
return self.beta * dloss
def _compute(self, predictions, references):
"""Returns the accuracy score of the prediction"""
num_data = references.size()[0]
predicted = torch.argmax(predictions, dim=1)
correct_pred = torch.sum(predicted == references)
accuracy = correct_pred*(100/num_data)
return {
"accuracy": accuracy.item(),
}