File size: 3,413 Bytes
d61b9c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import os
import time
import string
import argparse
import re
import validators
import sys

import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
import torch.nn.functional as F
import numpy as np
from nltk.metrics.distance import edit_distance
import pickle

from utils import CTCLabelConverter, AttnLabelConverter, Averager, TokenLabelConverter
from dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from model import Model, STRScore
from utils import get_args, AccuracyMeter
import matplotlib.pyplot as plt
import settings

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

def getPredAndConf(opt, model, scoring, image, converter, labels):
    batch_size = image.size(0)
    length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
    text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
    if not opt.Transformer:
        text_for_loss, length_for_loss = converter.encode(labels, batch_max_length=opt.batch_max_length)

    if settings.MODEL=="vitstr":
        target = converter.encode(labels)
        preds = model(image, text=target, seqlen=converter.batch_max_length)

        confScore = scoring(preds)
        confScore = confScore.detach().cpu().numpy()

        _, preds_index = preds.topk(1, dim=-1, largest=True, sorted=True)
        preds_index = preds_index.view(-1, converter.batch_max_length)

        length_for_pred = torch.IntTensor([converter.batch_max_length - 1] * batch_size).to(device)
        preds_str = converter.decode(preds_index[:, 1:], length_for_pred)
        preds_str = preds_str[0]
        preds_str = preds_str[:preds_str.find('[s]')]

    elif settings.MODEL=="trba":
        preds = model(image)
        confScore = scoring(preds)
        _, preds_index = preds.max(2)
        preds_str = converter.decode(preds_index, length_for_pred)
        # print("preds_str: ", preds_str) # ['ronaldo[s]
        preds_str = preds_str[0]
        preds_str = preds_str[:preds_str.find('[s]')]

    elif settings.MODEL=="srn":
        target = converter.encode(labels)
        preds = model(image, None)

        _, preds_index = preds[2].max(2)

        confScore = scoring(preds)
        confScore = confScore.detach().cpu().numpy()

        length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
        # length_for_pred = torch.IntTensor([converter.batch_max_length - 1] * batch_size).to(device)
        preds_str = converter.decode(preds_index, length_for_pred)
        preds_str = preds_str[0]
        # preds_str = preds_str[:preds_str.find('[s]')]
        preds = preds[2]

    elif settings.MODEL=="parseq":
        target = converter.encode(labels)
        preds = model(image)

        predStr, confidence = model.tokenizer.decode(preds)

        confScore = scoring(preds)
        confScore = confScore.detach().cpu().numpy()

        # _, preds_index = preds.topk(1, dim=-1, largest=True, sorted=True)
        # preds_index = preds_index.view(-1, converter.batch_max_length)
        #
        # length_for_pred = torch.IntTensor([converter.batch_max_length - 1] * batch_size).to(device)
        # preds_str = converter.decode(preds_index[:, 0:], length_for_pred)
        preds_str = predStr[0]
        # preds_str = preds_str[:preds_str.find('[s]')]
        # pred = pred[:pred_EOS]
    return preds_str, confScore