markytools commited on
Commit
7978529
·
1 Parent(s): d61b9c7

updated app py

Browse files
Files changed (8) hide show
  1. .gitignore +5 -0
  2. app.py +177 -2
  3. model.py +5 -0
  4. requirements.txt +175 -0
  5. settings.py +1 -1
  6. str_exp_demo.py +2 -2
  7. str_exp_demo_huggingface.py +513 -0
  8. utils.py +6 -7
.gitignore CHANGED
@@ -21,12 +21,17 @@
21
  *.sh
22
  **/__pycache__
23
  workdir/
 
24
  .remote-sync.json
25
  *.png
 
26
  pretrained/
 
27
  attributionImgs/
28
  attributionImgsOld/
29
  attrSelectivityOld/
 
 
30
 
31
  ### Linux ###
32
  *~
 
21
  *.sh
22
  **/__pycache__
23
  workdir/
24
+ datasets/
25
  .remote-sync.json
26
  *.png
27
+ demo_image_output/
28
  pretrained/
29
+ attributionData/
30
  attributionImgs/
31
  attributionImgsOld/
32
  attrSelectivityOld/
33
+ pretrained.zip
34
+ datasets.zip
35
 
36
  ### Linux ###
37
  *~
app.py CHANGED
@@ -1,4 +1,179 @@
1
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- x = st.slider('Select a value')
4
- st.write(x, 'squared is', x * x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from PIL import Image
3
+ import settings
4
+ import captum
5
+ import numpy as np
6
+ import torch
7
+ import torch.nn.functional as F
8
+ import torch.backends.cudnn as cudnn
9
+ from utils import get_args
10
+ from utils import CTCLabelConverter, AttnLabelConverter, Averager, TokenLabelConverter
11
+ import string
12
+ import time
13
+ import sys
14
+ from dataset import hierarchical_dataset, AlignCollate
15
+ import validators
16
+ from model import Model, STRScore
17
+ from PIL import Image
18
+ from lime.wrappers.scikit_image import SegmentationAlgorithm
19
+ from captum._utils.models.linear_model import SkLearnLinearModel, SkLearnRidge
20
+ import random
21
+ import os
22
+ from skimage.color import gray2rgb
23
+ import pickle
24
+ from train_shap_corr import getPredAndConf
25
+ import re
26
+ from captum_test import acquire_average_auc, saveAttrData
27
+ import copy
28
+ from skimage.color import gray2rgb
29
+ from matplotlib import pyplot as plt
30
+ from torchvision import transforms
31
 
32
+ device = torch.device('cpu')
33
+ opt = get_args(is_train=False)
34
+
35
+ """ vocab / character number configuration """
36
+ if opt.sensitive:
37
+ opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
38
+
39
+ cudnn.benchmark = True
40
+ cudnn.deterministic = True
41
+ # opt.num_gpu = torch.cuda.device_count()
42
+
43
+ # combineBestDataXAI(opt)
44
+ # acquire_average_auc(opt)
45
+ # acquireSingleCharAttrAve(opt)
46
+ modelName = "parseq"
47
+ opt.modelName = modelName
48
+ # opt.eval_data = "datasets/data_lmdb_release/evaluation"
49
+
50
+ if modelName=="vitstr":
51
+ opt.benchmark_all_eval = True
52
+ opt.Transformation = "None"
53
+ opt.FeatureExtraction = "None"
54
+ opt.SequenceModeling = "None"
55
+ opt.Prediction = "None"
56
+ opt.Transformer = True
57
+ opt.sensitive = True
58
+ opt.imgH = 224
59
+ opt.imgW = 224
60
+ opt.data_filtering_off = True
61
+ opt.TransformerModel= "vitstr_base_patch16_224"
62
+ opt.saved_model = "pretrained/vitstr_base_patch16_224_aug.pth"
63
+ opt.batch_size = 1
64
+ opt.workers = 0
65
+ opt.scorer = "mean"
66
+ opt.blackbg = True
67
+ elif modelName=="parseq":
68
+ opt.benchmark_all_eval = True
69
+ opt.Transformation = "None"
70
+ opt.FeatureExtraction = "None"
71
+ opt.SequenceModeling = "None"
72
+ opt.Prediction = "None"
73
+ opt.Transformer = True
74
+ opt.sensitive = True
75
+ opt.imgH = 32
76
+ opt.imgW = 128
77
+ opt.data_filtering_off = True
78
+ opt.batch_size = 1
79
+ opt.workers = 0
80
+ opt.scorer = "mean"
81
+ opt.blackbg = True
82
+
83
+ # x = st.slider('Select a value')
84
+ # st.write(x, 'squared is', x * x)
85
+
86
+ image = Image.open('demo_image/demo_ballys.jpg') #Brand logo image (optional)
87
+ #Create two columns with different width
88
+ col1, col2 = st.columns( [0.8, 0.2])
89
+ with col1: # To display the header text using css style
90
+ st.markdown(""" <style> .font {
91
+ font-size:35px ; font-family: 'Cooper Black'; color: #FF9633;}
92
+ </style> """, unsafe_allow_html=True)
93
+ st.markdown('<p class="font">Upload your photo here...</p>', unsafe_allow_html=True)
94
+ with col2: # To display brand logo
95
+ st.image(image, width=150)
96
+
97
+ uploaded_file = st.file_uploader("Choose a file", type=["png", "jpg"])
98
+ if uploaded_file is not None:
99
+ # To read file as bytes:
100
+ bytes_data = uploaded_file.getvalue()
101
+ pilImg = Image.open(uploaded_file)
102
+
103
+ orig_img_tensors = transforms.ToTensor()(pilImg).unsqueeze(0)
104
+ img1 = orig_img_tensors.to(device)
105
+ # image_tensors = ((torch.clone(orig_img_tensors) + 1.0) / 2.0) * 255.0
106
+ image_tensors = torch.mean(orig_img_tensors, dim=1).unsqueeze(0).unsqueeze(0)
107
+ imgDataDict = {}
108
+ img_numpy = image_tensors.cpu().detach().numpy()[0] ### Need to set batch size to 1 only
109
+ if img_numpy.shape[0] == 1:
110
+ img_numpy = gray2rgb(img_numpy[0])
111
+ # print("img_numpy shape: ", img_numpy.shape) # (1, 32, 128, 3)
112
+ segmOutput = segmentation_fn(img_numpy[0])
113
+
114
+ results_dict = {}
115
+ aveAttr = []
116
+ aveAttr_charContrib = []
117
+ target = converter.encode([labels])
118
+
119
+ # labels: RONALDO
120
+ segmDataNP = segmOutput
121
+ img1.requires_grad = True
122
+ bgImg = torch.zeros(img1.shape).to(device)
123
+
124
+ # preds = model(img1, seqlen=converter.batch_max_length)
125
+ input = img1
126
+ origImgNP = torch.clone(orig_img_tensors).detach().cpu().numpy()[0][0] # (1, 1, 224, 224)
127
+ origImgNP = gray2rgb(origImgNP)
128
+ charOffset = 0
129
+ img1 = transforms.Normalize(0.5, 0.5)(img1) # Between -1 to 1
130
+ target = converter.encode([labels])
131
+
132
+ ### Local explanations only
133
+ collectedAttributions = []
134
+ for charIdx in range(0, len(labels)):
135
+ scoring_singlechar.setSingleCharOutput(charIdx + charOffset)
136
+ gtClassNum = target[0][charIdx + charOffset]
137
+
138
+ gs = GradientShap(super_pixel_model_singlechar)
139
+ baseline_dist = torch.zeros((1, 3, opt.imgH, opt.imgW))
140
+ baseline_dist = baseline_dist.to(device)
141
+ attributions = gs.attribute(input, baselines=baseline_dist, target=0)
142
+ collectedAttributions.append(attributions)
143
+ aveAttributions = torch.mean(torch.cat(collectedAttributions,dim=0), dim=0).unsqueeze(0)
144
+ # if not torch.isnan(aveAttributions).any():
145
+ # rankedAttr = rankedAttributionsBySegm(aveAttributions, segmDataNP)
146
+ # rankedAttr = rankedAttr.detach().cpu().numpy()[0][0]
147
+ # rankedAttr = gray2rgb(rankedAttr)
148
+ # mplotfig, _ = visualize_image_attr(rankedAttr, origImgNP, method='blended_heat_map', cmap='RdYlGn')
149
+ # mplotfig.savefig(outputDir + '{}_shapley_l.png'.format(nameNoExt))
150
+ # mplotfig.clear()
151
+ # plt.close(mplotfig)
152
+
153
+ ### Local Sampling
154
+ gs = GradientShap(super_pixel_model)
155
+ baseline_dist = torch.zeros((1, 3, opt.imgH, opt.imgW))
156
+ baseline_dist = baseline_dist.to(device)
157
+ attributions = gs.attribute(input, baselines=baseline_dist, target=0)
158
+ # if not torch.isnan(attributions).any():
159
+ # collectedAttributions.append(attributions)
160
+ # rankedAttr = rankedAttributionsBySegm(attributions, segmDataNP)
161
+ # rankedAttr = rankedAttr.detach().cpu().numpy()[0][0]
162
+ # rankedAttr = gray2rgb(rankedAttr)
163
+ # mplotfig, _ = visualize_image_attr(rankedAttr, origImgNP, method='blended_heat_map', cmap='RdYlGn')
164
+ # mplotfig.savefig(outputDir + '{}_shapley.png'.format(nameNoExt))
165
+ # mplotfig.clear()
166
+ # plt.close(mplotfig)
167
+
168
+ ### Global + Local context
169
+ aveAttributions = torch.mean(torch.cat(collectedAttributions,dim=0), dim=0).unsqueeze(0)
170
+ if not torch.isnan(aveAttributions).any():
171
+ rankedAttr = rankedAttributionsBySegm(aveAttributions, segmDataNP)
172
+ rankedAttr = rankedAttr.detach().cpu().numpy()[0][0]
173
+ rankedAttr = gray2rgb(rankedAttr)
174
+ mplotfig, _ = visualize_image_attr(rankedAttr, origImgNP, method='blended_heat_map', cmap='RdYlGn')
175
+ fig = mplotfig.figure(figsize=(8,8))
176
+ st.pyplot(fig)
177
+ # mplotfig.savefig(outputDir + '{}_shapley_gl.png'.format(nameNoExt))
178
+ # mplotfig.clear()
179
+ # plt.close(mplotfig)
model.py CHANGED
@@ -33,8 +33,11 @@ import settings
33
  class STRScore(nn.Module):
34
  def __init__(self, opt, converter, device, gtStr="", enableSingleCharAttrAve=False, model=None):
35
  super(STRScore, self).__init__()
 
 
36
  self.enableSingleCharAttrAve = enableSingleCharAttrAve
37
  self.singleChar = -1
 
38
  self.opt = opt
39
  self.converter = converter
40
  self.device = device
@@ -75,6 +78,8 @@ class STRScore(nn.Module):
75
  preds_str = self.converter.decode(preds_index[:, 1:], length_for_pred)
76
  elif settings.MODEL == 'parseq':
77
  preds_str, confidence = self.model.tokenizer.decode(preds)
 
 
78
  # print("preds_str: ", preds_str)
79
  else:
80
  preds = preds[:, :text_for_loss_length, :]
 
33
  class STRScore(nn.Module):
34
  def __init__(self, opt, converter, device, gtStr="", enableSingleCharAttrAve=False, model=None):
35
  super(STRScore, self).__init__()
36
+ if opt.modelName:
37
+ settings.MODEL = opt.modelName
38
  self.enableSingleCharAttrAve = enableSingleCharAttrAve
39
  self.singleChar = -1
40
+ self.recentlyPredStr = None
41
  self.opt = opt
42
  self.converter = converter
43
  self.device = device
 
78
  preds_str = self.converter.decode(preds_index[:, 1:], length_for_pred)
79
  elif settings.MODEL == 'parseq':
80
  preds_str, confidence = self.model.tokenizer.decode(preds)
81
+ self.recentlyPredStr = preds_str[-1]
82
+ # print("preds_str: ", preds_str)
83
  # print("preds_str: ", preds_str)
84
  else:
85
  preds = preds[:, :text_for_loss_length, :]
requirements.txt ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==1.2.0
2
+ aiohttp==3.8.1
3
+ aiosignal==1.2.0
4
+ anyio==3.5.0
5
+ argon2-cffi==21.3.0
6
+ argon2-cffi-bindings==21.2.0
7
+ asttokens==2.0.5
8
+ async-timeout==4.0.2
9
+ attrs==21.4.0
10
+ Babel==2.9.1
11
+ backcall==0.2.0
12
+ beautifulsoup4==4.11.1
13
+ bleach==4.1.0
14
+ blinker==1.4
15
+ Bottleneck==1.3.5
16
+ brotlipy==0.7.0
17
+ cachetools==5.2.0
18
+ certifi==2022.6.15
19
+ cffi==1.15.0
20
+ charset-normalizer==2.0.4
21
+ click==8.1.3
22
+ cloudpickle==2.0.0
23
+ colorama==0.4.5
24
+ cryptography==37.0.1
25
+ cycler==0.11.0
26
+ cytoolz==0.11.0
27
+ dask==2022.7.0
28
+ debugpy==1.5.1
29
+ decorator==5.1.1
30
+ defusedxml==0.7.1
31
+ einops==0.4.1
32
+ entrypoints==0.4
33
+ executing==0.8.3
34
+ fastjsonschema==2.15.1
35
+ fonttools==4.25.0
36
+ frozenlist==1.3.1
37
+ fsspec==2022.3.0
38
+ future==0.18.2
39
+ google-auth==2.11.0
40
+ google-auth-oauthlib==0.4.6
41
+ grpcio==1.48.1
42
+ idna==3.3
43
+ imageio==2.19.3
44
+ importlib-metadata==4.11.4
45
+ importlib-resources==5.2.0
46
+ ipykernel==6.9.1
47
+ ipython==8.4.0
48
+ ipython-genutils==0.2.0
49
+ ipywidgets==7.6.5
50
+ jedi==0.18.1
51
+ Jinja2==3.0.3
52
+ joblib==1.1.0
53
+ json5==0.9.6
54
+ jsonschema==4.4.0
55
+ jupyter==1.0.0
56
+ jupyter-client==7.2.2
57
+ jupyter-console==6.4.3
58
+ jupyter-core==4.10.0
59
+ jupyter-server==1.18.1
60
+ jupyterlab==3.4.4
61
+ jupyterlab-pygments==0.1.2
62
+ jupyterlab-server==2.12.0
63
+ jupyterlab-widgets==1.0.0
64
+ kiwisolver==1.4.2
65
+ llvmlite==0.38.1
66
+ lmdb==1.3.0
67
+ locket==1.0.0
68
+ Markdown==3.4.1
69
+ MarkupSafe==2.1.1
70
+ matplotlib==3.5.1
71
+ matplotlib-inline==0.1.2
72
+ mistune==0.8.4
73
+ mkl-fft==1.3.1
74
+ mkl-random==1.2.2
75
+ mkl-service==2.4.0
76
+ multidict==6.0.2
77
+ munkres==1.1.4
78
+ natsort==8.1.0
79
+ nb-conda-kernels==2.3.1
80
+ nbclassic==0.3.5
81
+ nbclient==0.5.13
82
+ nbconvert==6.4.4
83
+ nbformat==5.3.0
84
+ nest-asyncio==1.5.5
85
+ networkx==2.8.4
86
+ nltk==3.6.7
87
+ notebook==6.4.12
88
+ numba==0.55.2
89
+ numexpr==2.8.3
90
+ numpy==1.22.3
91
+ oauthlib==3.2.0
92
+ packaging==21.3
93
+ pandas==1.4.3
94
+ pandocfilters==1.5.0
95
+ parso==0.8.3
96
+ partd==1.2.0
97
+ pexpect==4.8.0
98
+ pickleshare==0.7.5
99
+ Pillow==9.2.0
100
+ pip==22.1.2
101
+ ply==3.11
102
+ prometheus-client==0.13.1
103
+ prompt-toolkit==3.0.20
104
+ protobuf==4.21.5
105
+ ptyprocess==0.7.0
106
+ pure-eval==0.2.2
107
+ pyasn1==0.4.8
108
+ pyasn1-modules==0.2.7
109
+ pycparser==2.21
110
+ pyDeprecate==0.3.2
111
+ Pygments==2.11.2
112
+ PyJWT==2.4.0
113
+ pyOpenSSL==22.0.0
114
+ pyparsing==3.0.4
115
+ PyQt5==5.12.3
116
+ PyQt5-sip==12.11.0
117
+ PyQtChart==5.12
118
+ PyQtWebEngine==5.12.1
119
+ pyrsistent==0.18.0
120
+ PySocks==1.7.1
121
+ python-dateutil==2.8.2
122
+ pytorch-lightning==1.6.3
123
+ pytorch-wavelets==1.3.0
124
+ pytz==2022.1
125
+ pyu2f==0.1.5
126
+ PyWavelets==1.3.0
127
+ PyYAML==6.0
128
+ pyzmq==23.2.0
129
+ qtconsole==5.3.1
130
+ QtPy==2.0.1
131
+ regex==2022.7.25
132
+ requests==2.28.1
133
+ requests-oauthlib==1.3.1
134
+ rsa==4.9
135
+ scikit-image==0.19.2
136
+ scikit-learn==1.1.1
137
+ scipy==1.7.3
138
+ Send2Trash==1.8.0
139
+ setuptools==59.5.0
140
+ sip==6.6.2
141
+ six==1.16.0
142
+ slicer==0.0.7
143
+ sniffio==1.2.0
144
+ soupsieve==2.3.1
145
+ stack-data==0.2.0
146
+ tensorboard==2.10.0
147
+ tensorboard-data-server==0.6.0
148
+ tensorboard-plugin-wit==1.8.1
149
+ terminado==0.13.1
150
+ testpath==0.6.0
151
+ threadpoolctl==2.2.0
152
+ tifffile==2020.10.1
153
+ timm==0.6.7
154
+ toml==0.10.2
155
+ toolz==0.11.2
156
+ torch==1.10.1
157
+ torch-summary==1.4.5
158
+ torchaudio==0.10.1
159
+ torchmetrics==0.9.3
160
+ torchvision==0.11.2
161
+ tornado==6.1
162
+ tqdm==4.64.0
163
+ traitlets==5.1.1
164
+ typing_extensions==4.1.1
165
+ urllib3==1.26.11
166
+ validators==0.18.2
167
+ Wand==0.6.7
168
+ wcwidth==0.2.5
169
+ webencodings==0.5.1
170
+ websocket-client==0.58.0
171
+ Werkzeug==2.2.2
172
+ wheel==0.37.1
173
+ widgetsnbextension==3.5.2
174
+ yarl==1.7.2
175
+ zipp==3.8.0
settings.py CHANGED
@@ -1,4 +1,4 @@
1
  ######### global settings #########
2
- MODEL = 'vitstr' # model arch: vitstr, parseq, srn, abinet, trba, matrn
3
  SEGM_DIR = "./datasets/segmentations" # segmentation directory of the real test sets
4
  TARGET_DATASET = "SVTP" # 'IIIT5k_3000', 'SVT', 'IC03_860', 'IC03_867', 'IC13_857', 'IC13_1015', 'IC15_1811', 'IC15_2077', 'SVTP', 'CUTE80'
 
1
  ######### global settings #########
2
+ MODEL = 'parseq' # model arch: vitstr, parseq, srn, abinet, trba, matrn
3
  SEGM_DIR = "./datasets/segmentations" # segmentation directory of the real test sets
4
  TARGET_DATASET = "SVTP" # 'IIIT5k_3000', 'SVT', 'IC03_860', 'IC03_867', 'IC13_857', 'IC13_1015', 'IC15_1811', 'IC15_2077', 'SVTP', 'CUTE80'
str_exp_demo.py CHANGED
@@ -154,7 +154,7 @@ def acquireSelectivityHit(origImg, attributions, segmentations, model, converter
154
  pred = pred.lower()
155
  gt = gt.lower()
156
  alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
157
- out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'
158
  pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
159
  gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
160
  if pred == gt:
@@ -189,7 +189,7 @@ def acquire_selectivity_auc(opt, pkl_filename=None):
189
  def sampleDemo(opt):
190
  targetDataset = "SVTP"
191
  demoImgDir = "demo_image/"
192
- outputDir = "/data/goo/demo_image_output/"
193
 
194
  if not os.path.exists(outputDir):
195
  os.makedirs(outputDir)
 
154
  pred = pred.lower()
155
  gt = gt.lower()
156
  alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
157
+ out_of_alphanumeric_case_insensitve = f"[^{alphanumeric_case_insensitve}]"
158
  pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
159
  gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
160
  if pred == gt:
 
189
  def sampleDemo(opt):
190
  targetDataset = "SVTP"
191
  demoImgDir = "demo_image/"
192
+ outputDir = "demo_image_output/"
193
 
194
  if not os.path.exists(outputDir):
195
  os.makedirs(outputDir)
str_exp_demo_huggingface.py ADDED
@@ -0,0 +1,513 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import settings
2
+ import captum
3
+ import numpy as np
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import torch.backends.cudnn as cudnn
7
+ from utils import get_args
8
+ from utils import CTCLabelConverter, AttnLabelConverter, Averager, TokenLabelConverter
9
+ import string
10
+ import time
11
+ import sys
12
+ from dataset import hierarchical_dataset, AlignCollate
13
+ import validators
14
+ from model import Model, STRScore
15
+ from PIL import Image
16
+ from lime.wrappers.scikit_image import SegmentationAlgorithm
17
+ from captum._utils.models.linear_model import SkLearnLinearModel, SkLearnRidge
18
+ import random
19
+ import os
20
+ from skimage.color import gray2rgb
21
+ import pickle
22
+ from train_shap_corr import getPredAndConf
23
+ import re
24
+ from captum_test import acquire_average_auc, saveAttrData
25
+ import copy
26
+ from skimage.color import gray2rgb
27
+ from matplotlib import pyplot as plt
28
+ from torchvision import transforms
29
+
30
+ device = torch.device('cpu')
31
+
32
+ from captum.attr import (
33
+ GradientShap,
34
+ DeepLift,
35
+ DeepLiftShap,
36
+ IntegratedGradients,
37
+ LayerConductance,
38
+ NeuronConductance,
39
+ NoiseTunnel,
40
+ Saliency,
41
+ InputXGradient,
42
+ GuidedBackprop,
43
+ Deconvolution,
44
+ GuidedGradCam,
45
+ FeatureAblation,
46
+ ShapleyValueSampling,
47
+ Lime,
48
+ KernelShap
49
+ )
50
+
51
+ from captum.metrics import (
52
+ infidelity,
53
+ sensitivity_max
54
+ )
55
+
56
+ from captum.attr._utils.visualization import visualize_image_attr
57
+
58
+ ### Acquire pixelwise attributions and replace them with ranked numbers averaged
59
+ ### across segmentation with the largest contribution having the largest number
60
+ ### and the smallest set to 1, which is the minimum number.
61
+ ### attr - original attribution
62
+ ### segm - image segmentations
63
+ def rankedAttributionsBySegm(attr, segm):
64
+ aveSegmentations, sortedDict = averageSegmentsOut(attr[0,0], segm)
65
+ totalSegm = len(sortedDict.keys()) # total segmentations
66
+ sortedKeys = [k for k, v in sorted(sortedDict.items(), key=lambda item: item[1])]
67
+ sortedKeys = sortedKeys[::-1] ### A list that should contain largest to smallest score
68
+ currentRank = totalSegm
69
+ rankedSegmImg = torch.clone(attr)
70
+ for totalSegToHide in range(0, len(sortedKeys)):
71
+ currentSegmentToHide = sortedKeys[totalSegToHide]
72
+ rankedSegmImg[0,0][segm == currentSegmentToHide] = currentRank
73
+ currentRank -= 1
74
+ return rankedSegmImg
75
+
76
+ ### Returns the mean for each segmentation having shape as the same as the input
77
+ ### This function can only one attribution image at a time
78
+ def averageSegmentsOut(attr, segments):
79
+ averagedInput = torch.clone(attr)
80
+ sortedDict = {}
81
+ for x in np.unique(segments):
82
+ segmentMean = torch.mean(attr[segments == x][:])
83
+ sortedDict[x] = float(segmentMean.detach().cpu().numpy())
84
+ averagedInput[segments == x] = segmentMean
85
+ return averagedInput, sortedDict
86
+
87
+ ### Output and save segmentations only for one dataset only
88
+ def outputSegmOnly(opt):
89
+ ### targetDataset - one dataset only, SVTP-645, CUTE80-288images
90
+ targetDataset = "CUTE80" # ['IIIT5k_3000', 'SVT', 'IC03_867', 'IC13_1015', 'IC15_2077', 'SVTP', 'CUTE80']
91
+ segmRootDir = "/home/uclpc1/Documents/STR/datasets/segmentations/224X224/{}/".format(targetDataset)
92
+
93
+ if not os.path.exists(segmRootDir):
94
+ os.makedirs(segmRootDir)
95
+
96
+ opt.eval = True
97
+ ### Only IIIT5k_3000
98
+ if opt.fast_acc:
99
+ # # To easily compute the total accuracy of our paper.
100
+ eval_data_list = [targetDataset]
101
+ else:
102
+ # The evaluation datasets, dataset order is same with Table 1 in our paper.
103
+ eval_data_list = [targetDataset]
104
+
105
+ ### Taken from LIME
106
+ segmentation_fn = SegmentationAlgorithm('quickshift', kernel_size=4,
107
+ max_dist=200, ratio=0.2,
108
+ random_seed=random.randint(0, 1000))
109
+
110
+ for eval_data in eval_data_list:
111
+ eval_data_path = os.path.join(opt.eval_data, eval_data)
112
+ AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, opt=opt)
113
+ eval_data, eval_data_log = hierarchical_dataset(root=eval_data_path, opt=opt)
114
+ evaluation_loader = torch.utils.data.DataLoader(
115
+ eval_data, batch_size=1,
116
+ shuffle=False,
117
+ num_workers=int(opt.workers),
118
+ collate_fn=AlignCollate_evaluation, pin_memory=True)
119
+ for i, (image_tensors, labels) in enumerate(evaluation_loader):
120
+ imgDataDict = {}
121
+ img_numpy = image_tensors.cpu().detach().numpy()[0] ### Need to set batch size to 1 only
122
+ if img_numpy.shape[0] == 1:
123
+ img_numpy = gray2rgb(img_numpy[0])
124
+ # print("img_numpy shape: ", img_numpy.shape) # (224,224,3)
125
+ segmOutput = segmentation_fn(img_numpy)
126
+ imgDataDict['segdata'] = segmOutput
127
+ imgDataDict['label'] = labels[0]
128
+ outputPickleFile = segmRootDir + "{}.pkl".format(i)
129
+ with open(outputPickleFile, 'wb') as f:
130
+ pickle.dump(imgDataDict, f)
131
+
132
+ def acquireSelectivityHit(origImg, attributions, segmentations, model, converter, labels, scoring):
133
+ # print("segmentations unique len: ", np.unique(segmentations))
134
+ aveSegmentations, sortedDict = averageSegmentsOut(attributions[0,0], segmentations)
135
+ sortedKeys = [k for k, v in sorted(sortedDict.items(), key=lambda item: item[1])]
136
+ sortedKeys = sortedKeys[::-1] ### A list that should contain largest to smallest score
137
+ # print("sortedDict: ", sortedDict) # {0: -5.51e-06, 1: -1.469e-05, 2: -3.06e-05,...}
138
+ # print("aveSegmentations unique len: ", np.unique(aveSegmentations))
139
+ # print("aveSegmentations device: ", aveSegmentations.device) # cuda:0
140
+ # print("aveSegmentations shape: ", aveSegmentations.shape) # (224,224)
141
+ # print("aveSegmentations: ", aveSegmentations)
142
+
143
+ n_correct = []
144
+ confidenceList = [] # First index is one feature removed, second index two features removed, and so on...
145
+ clonedImg = torch.clone(origImg)
146
+ gt = str(labels)
147
+ for totalSegToHide in range(0, len(sortedKeys)):
148
+ ### Acquire LIME prediction result
149
+ currentSegmentToHide = sortedKeys[totalSegToHide]
150
+ clonedImg[0,0][segmentations == currentSegmentToHide] = 0.0
151
+ pred, confScore = getPredAndConf(opt, model, scoring, clonedImg, converter, np.array([gt]))
152
+ # To evaluate 'case sensitive model' with alphanumeric and case insensitve setting.
153
+ if opt.sensitive and opt.data_filtering_off:
154
+ pred = pred.lower()
155
+ gt = gt.lower()
156
+ alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
157
+ out_of_alphanumeric_case_insensitve = f"[^{alphanumeric_case_insensitve}]"
158
+ pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
159
+ gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
160
+ if pred == gt:
161
+ n_correct.append(1)
162
+ else:
163
+ n_correct.append(0)
164
+ confScore = confScore[0][0]*100
165
+ confidenceList.append(confScore)
166
+ return n_correct, confidenceList
167
+
168
+ ### Once you have the selectivity_eval_results.pkl file,
169
+ def acquire_selectivity_auc(opt, pkl_filename=None):
170
+ if pkl_filename is None:
171
+ pkl_filename = "/home/goo/str/str_vit_dataexplain_lambda/metrics_sensitivity_eval_results_CUTE80.pkl" # VITSTR
172
+ accKeys = []
173
+
174
+ with open(pkl_filename, 'rb') as f:
175
+ selectivity_data = pickle.load(f)
176
+
177
+ for resDictIdx, resDict in enumerate(selectivity_data):
178
+ keylistAcc = []
179
+ keylistConf = []
180
+ metricsKeys = resDict.keys()
181
+ for keyStr in resDict.keys():
182
+ if "_acc" in keyStr: keylistAcc.append(keyStr)
183
+ if "_conf" in keyStr: keylistConf.append(keyStr)
184
+ # Need to check if network correctly predicted the image
185
+ for metrics_accStr in keylistAcc:
186
+ if 1 not in resDict[metrics_accStr]: print("resDictIdx")
187
+
188
+ # Single directory STRExp explanations output demo
189
+ def sampleDemo(opt, modelName):
190
+ targetDataset = "SVTP"
191
+ demoImgDir = "demo_image/"
192
+ outputDir = "demo_image_output/"
193
+
194
+ if not os.path.exists(outputDir):
195
+ os.makedirs(outputDir)
196
+
197
+ segmentation_fn = SegmentationAlgorithm('quickshift', kernel_size=4,
198
+ max_dist=200, ratio=0.2,
199
+ random_seed=random.randint(0, 1000))
200
+
201
+ if modelName=="vitstr":
202
+ if opt.Transformer:
203
+ converter = TokenLabelConverter(opt)
204
+ elif 'CTC' in opt.Prediction:
205
+ converter = CTCLabelConverter(opt.character)
206
+ else:
207
+ converter = AttnLabelConverter(opt.character)
208
+ opt.num_class = len(converter.character)
209
+ if opt.rgb:
210
+ opt.input_channel = 3
211
+ model_obj = Model(opt)
212
+
213
+ model = torch.nn.DataParallel(model_obj).to(device)
214
+ modelCopy = copy.deepcopy(model)
215
+
216
+ """ evaluation """
217
+ scoring_singlechar = STRScore(opt=opt, converter=converter, device=device, enableSingleCharAttrAve=True)
218
+ super_pixel_model_singlechar = torch.nn.Sequential(
219
+ # super_pixler,
220
+ # numpy2torch_converter,
221
+ modelCopy,
222
+ scoring_singlechar
223
+ ).to(device)
224
+ modelCopy.eval()
225
+ scoring_singlechar.eval()
226
+ super_pixel_model_singlechar.eval()
227
+
228
+ # Single Char Attribution Averaging
229
+ # enableSingleCharAttrAve - set to True
230
+ scoring = STRScore(opt=opt, converter=converter, device=device)
231
+ super_pixel_model = torch.nn.Sequential(
232
+ # super_pixler,
233
+ # numpy2torch_converter,
234
+ model,
235
+ scoring
236
+ ).to(device)
237
+ model.eval()
238
+ scoring.eval()
239
+ super_pixel_model.eval()
240
+
241
+ elif modelName=="parseq":
242
+ model = torch.hub.load('baudm/parseq', 'parseq', pretrained=True)
243
+ # checkpoint = torch.hub.load_state_dict_from_url('https://github.com/baudm/parseq/releases/download/v1.0.0/parseq-bb5792a6.pt', map_location="cpu")
244
+ # # state_dict = {key.replace("module.", ""): value for key, value in checkpoint["state_dict"].items()}
245
+ # model.load_state_dict(checkpoint)
246
+ model = model.to(device)
247
+ model_obj = model
248
+ converter = TokenLabelConverter(opt)
249
+ modelCopy = copy.deepcopy(model)
250
+
251
+ """ evaluation """
252
+ scoring_singlechar = STRScore(opt=opt, converter=converter, device=device, enableSingleCharAttrAve=True, model=modelCopy)
253
+ super_pixel_model_singlechar = torch.nn.Sequential(
254
+ # super_pixler,
255
+ # numpy2torch_converter,
256
+ modelCopy,
257
+ scoring_singlechar
258
+ ).to(device)
259
+ modelCopy.eval()
260
+ scoring_singlechar.eval()
261
+ super_pixel_model_singlechar.eval()
262
+
263
+ # Single Char Attribution Averaging
264
+ # enableSingleCharAttrAve - set to True
265
+ scoring = STRScore(opt=opt, converter=converter, device=device, model=model)
266
+ super_pixel_model = torch.nn.Sequential(
267
+ # super_pixler,
268
+ # numpy2torch_converter,
269
+ model,
270
+ scoring
271
+ ).to(device)
272
+ model.eval()
273
+ scoring.eval()
274
+ super_pixel_model.eval()
275
+
276
+
277
+ if opt.blackbg:
278
+ shapImgLs = np.zeros(shape=(1, 1, 224, 224)).astype(np.float32)
279
+ trainList = np.array(shapImgLs)
280
+ background = torch.from_numpy(trainList).to(device)
281
+
282
+ opt.eval = True
283
+ for path, subdirs, files in os.walk(demoImgDir):
284
+ for name in files:
285
+ nameNoExt = name.split('.')[0]
286
+ labels = nameNoExt.split("_")[-1]
287
+ fullfilename = os.path.join(demoImgDir, name) # Value
288
+ pilImg = Image.open(fullfilename)
289
+
290
+ pilImg = pilImg.resize((opt.imgW, opt.imgH))
291
+ # fullfilename: /data/goo/strattr/attributionData/trba/CUTE80/66_featablt.pkl
292
+
293
+ ### Single char averaging
294
+ if modelName == 'vitstr':
295
+
296
+ orig_img_tensors = transforms.ToTensor()(pilImg)
297
+ orig_img_tensors = torch.mean(orig_img_tensors, dim=0).unsqueeze(0).unsqueeze(0)
298
+ image_tensors = ((torch.clone(orig_img_tensors) + 1.0) / 2.0) * 255.0
299
+ imgDataDict = {}
300
+ img_numpy = image_tensors.cpu().detach().numpy()[0] ### Need to set batch size to 1 only
301
+ if img_numpy.shape[0] == 1:
302
+ img_numpy = gray2rgb(img_numpy[0])
303
+ # print("img_numpy shape: ", img_numpy.shape) # (32,100,3)
304
+ segmOutput = segmentation_fn(img_numpy)
305
+ # print("orig_img_tensors shape: ", orig_img_tensors.shape) # (3, 224, 224)
306
+ # print("orig_img_tensors max: ", orig_img_tensors.max()) # 0.6824 (1)
307
+ # print("orig_img_tensors min: ", orig_img_tensors.min()) # 0.0235 (0)
308
+ # sys.exit()
309
+
310
+ results_dict = {}
311
+ aveAttr = []
312
+ aveAttr_charContrib = []
313
+ # segmData, labels = segAndLabels[0]
314
+ target = converter.encode([labels])
315
+
316
+ # labels: RONALDO
317
+ segmDataNP = segmOutput
318
+ segmTensor = torch.from_numpy(segmDataNP).unsqueeze(0).unsqueeze(0)
319
+ # print("segmTensor min: ", segmTensor.min()) # 0 starting segmentation
320
+ segmTensor = segmTensor.to(device)
321
+ # print("segmTensor shape: ", segmTensor.shape)
322
+ # img1 = np.asarray(imgPIL.convert('L'))
323
+ # sys.exit()
324
+ # img1 = img1 / 255.0
325
+ # img1 = torch.from_numpy(img1).unsqueeze(0).unsqueeze(0).type(torch.FloatTensor).to(device)
326
+ img1 = orig_img_tensors.to(device)
327
+ img1.requires_grad = True
328
+ bgImg = torch.zeros(img1.shape).to(device)
329
+ input = img1
330
+ origImgNP = torch.clone(orig_img_tensors).detach().cpu().numpy()[0][0] # (1, 1, 224, 224)
331
+ origImgNP = gray2rgb(origImgNP)
332
+ charOffset = 1
333
+ # preds = model(img1, seqlen=converter.batch_max_length)
334
+
335
+ ### Local explanations only
336
+ collectedAttributions = []
337
+ for charIdx in range(0, len(labels)):
338
+ scoring_singlechar.setSingleCharOutput(charIdx + charOffset)
339
+ gtClassNum = target[0][charIdx + charOffset]
340
+
341
+ ### Shapley Value Sampling
342
+ svs = ShapleyValueSampling(super_pixel_model_singlechar)
343
+ # attr = svs.attribute(input, target=0, n_samples=200) ### Individual pixels, too long to calculate
344
+ attributions = svs.attribute(input, target=gtClassNum, feature_mask=segmTensor)
345
+ collectedAttributions.append(attributions)
346
+ aveAttributions = torch.mean(torch.cat(collectedAttributions,dim=0), dim=0).unsqueeze(0)
347
+ if not torch.isnan(aveAttributions).any():
348
+ rankedAttr = rankedAttributionsBySegm(aveAttributions, segmDataNP)
349
+ rankedAttr = rankedAttr.detach().cpu().numpy()[0][0]
350
+ rankedAttr = gray2rgb(rankedAttr)
351
+ mplotfig, _ = visualize_image_attr(rankedAttr, origImgNP, method='blended_heat_map', cmap='RdYlGn')
352
+ mplotfig.savefig(outputDir + '{}_shapley_l.png'.format(nameNoExt))
353
+ mplotfig.clear()
354
+ plt.close(mplotfig)
355
+
356
+ ### Shapley Value Sampling
357
+ svs = ShapleyValueSampling(super_pixel_model)
358
+ # attr = svs.attribute(input, target=0, n_samples=200) ### Individual pixels, too long to calculate
359
+ attributions = svs.attribute(input, target=0, feature_mask=segmTensor)
360
+ if not torch.isnan(attributions).any():
361
+ collectedAttributions.append(attributions)
362
+ rankedAttr = rankedAttributionsBySegm(attributions, segmDataNP)
363
+ rankedAttr = rankedAttr.detach().cpu().numpy()[0][0]
364
+ rankedAttr = gray2rgb(rankedAttr)
365
+ mplotfig, _ = visualize_image_attr(rankedAttr, origImgNP, method='blended_heat_map', cmap='RdYlGn')
366
+ mplotfig.savefig(outputDir + '{}_shapley.png'.format(nameNoExt))
367
+ mplotfig.clear()
368
+ plt.close(mplotfig)
369
+
370
+ ### Global + Local context
371
+ aveAttributions = torch.mean(torch.cat(collectedAttributions,dim=0), dim=0).unsqueeze(0)
372
+ if not torch.isnan(aveAttributions).any():
373
+ rankedAttr = rankedAttributionsBySegm(aveAttributions, segmDataNP)
374
+ rankedAttr = rankedAttr.detach().cpu().numpy()[0][0]
375
+ rankedAttr = gray2rgb(rankedAttr)
376
+ mplotfig, _ = visualize_image_attr(rankedAttr, origImgNP, method='blended_heat_map', cmap='RdYlGn')
377
+ mplotfig.savefig(outputDir + '{}_shapley_gl.png'.format(nameNoExt))
378
+ mplotfig.clear()
379
+ plt.close(mplotfig)
380
+
381
+ return
382
+
383
+ elif modelName == 'parseq':
384
+ orig_img_tensors = transforms.ToTensor()(pilImg).unsqueeze(0)
385
+ img1 = orig_img_tensors.to(device)
386
+ # image_tensors = ((torch.clone(orig_img_tensors) + 1.0) / 2.0) * 255.0
387
+ image_tensors = torch.mean(orig_img_tensors, dim=1).unsqueeze(0).unsqueeze(0)
388
+ imgDataDict = {}
389
+ img_numpy = image_tensors.cpu().detach().numpy()[0] ### Need to set batch size to 1 only
390
+ if img_numpy.shape[0] == 1:
391
+ img_numpy = gray2rgb(img_numpy[0])
392
+ # print("img_numpy shape: ", img_numpy.shape) # (1, 32, 128, 3)
393
+ segmOutput = segmentation_fn(img_numpy[0])
394
+
395
+ results_dict = {}
396
+ aveAttr = []
397
+ aveAttr_charContrib = []
398
+ target = converter.encode([labels])
399
+
400
+ # labels: RONALDO
401
+ segmDataNP = segmOutput
402
+ img1.requires_grad = True
403
+ bgImg = torch.zeros(img1.shape).to(device)
404
+
405
+ # preds = model(img1, seqlen=converter.batch_max_length)
406
+ input = img1
407
+ origImgNP = torch.clone(orig_img_tensors).detach().cpu().numpy()[0][0] # (1, 1, 224, 224)
408
+ origImgNP = gray2rgb(origImgNP)
409
+ charOffset = 0
410
+ img1 = transforms.Normalize(0.5, 0.5)(img1) # Between -1 to 1
411
+ target = converter.encode([labels])
412
+
413
+ ### Local explanations only
414
+ collectedAttributions = []
415
+ for charIdx in range(0, len(labels)):
416
+ scoring_singlechar.setSingleCharOutput(charIdx + charOffset)
417
+ gtClassNum = target[0][charIdx + charOffset]
418
+
419
+ gs = GradientShap(super_pixel_model_singlechar)
420
+ baseline_dist = torch.zeros((1, 3, opt.imgH, opt.imgW))
421
+ baseline_dist = baseline_dist.to(device)
422
+ attributions = gs.attribute(input, baselines=baseline_dist, target=0)
423
+ collectedAttributions.append(attributions)
424
+ aveAttributions = torch.mean(torch.cat(collectedAttributions,dim=0), dim=0).unsqueeze(0)
425
+ if not torch.isnan(aveAttributions).any():
426
+ rankedAttr = rankedAttributionsBySegm(aveAttributions, segmDataNP)
427
+ rankedAttr = rankedAttr.detach().cpu().numpy()[0][0]
428
+ rankedAttr = gray2rgb(rankedAttr)
429
+ mplotfig, _ = visualize_image_attr(rankedAttr, origImgNP, method='blended_heat_map', cmap='RdYlGn')
430
+ mplotfig.savefig(outputDir + '{}_shapley_l.png'.format(nameNoExt))
431
+ mplotfig.clear()
432
+ plt.close(mplotfig)
433
+
434
+ ### Local Sampling
435
+ gs = GradientShap(super_pixel_model)
436
+ baseline_dist = torch.zeros((1, 3, opt.imgH, opt.imgW))
437
+ baseline_dist = baseline_dist.to(device)
438
+ attributions = gs.attribute(input, baselines=baseline_dist, target=0)
439
+ if not torch.isnan(attributions).any():
440
+ collectedAttributions.append(attributions)
441
+ rankedAttr = rankedAttributionsBySegm(attributions, segmDataNP)
442
+ rankedAttr = rankedAttr.detach().cpu().numpy()[0][0]
443
+ rankedAttr = gray2rgb(rankedAttr)
444
+ mplotfig, _ = visualize_image_attr(rankedAttr, origImgNP, method='blended_heat_map', cmap='RdYlGn')
445
+ mplotfig.savefig(outputDir + '{}_shapley.png'.format(nameNoExt))
446
+ mplotfig.clear()
447
+ plt.close(mplotfig)
448
+
449
+ ### Global + Local context
450
+ aveAttributions = torch.mean(torch.cat(collectedAttributions,dim=0), dim=0).unsqueeze(0)
451
+ if not torch.isnan(aveAttributions).any():
452
+ rankedAttr = rankedAttributionsBySegm(aveAttributions, segmDataNP)
453
+ rankedAttr = rankedAttr.detach().cpu().numpy()[0][0]
454
+ rankedAttr = gray2rgb(rankedAttr)
455
+ mplotfig, _ = visualize_image_attr(rankedAttr, origImgNP, method='blended_heat_map', cmap='RdYlGn')
456
+ mplotfig.savefig(outputDir + '{}_shapley_gl.png'.format(nameNoExt))
457
+ mplotfig.clear()
458
+ plt.close(mplotfig)
459
+
460
+ continue
461
+
462
+ if __name__ == '__main__':
463
+ # deleteInf()
464
+ opt = get_args(is_train=False)
465
+
466
+ """ vocab / character number configuration """
467
+ if opt.sensitive:
468
+ opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
469
+
470
+ cudnn.benchmark = True
471
+ cudnn.deterministic = True
472
+ # opt.num_gpu = torch.cuda.device_count()
473
+
474
+ # combineBestDataXAI(opt)
475
+ # acquire_average_auc(opt)
476
+ # acquireSingleCharAttrAve(opt)
477
+ modelName = "parseq"
478
+ opt.modelName = modelName
479
+ opt.eval_data = "datasets/data_lmdb_release/evaluation"
480
+
481
+ if modelName=="vitstr":
482
+ opt.benchmark_all_eval = True
483
+ opt.Transformation = "None"
484
+ opt.FeatureExtraction = "None"
485
+ opt.SequenceModeling = "None"
486
+ opt.Prediction = "None"
487
+ opt.Transformer = True
488
+ opt.sensitive = True
489
+ opt.imgH = 224
490
+ opt.imgW = 224
491
+ opt.data_filtering_off = True
492
+ opt.TransformerModel= "vitstr_base_patch16_224"
493
+ opt.saved_model = "pretrained/vitstr_base_patch16_224_aug.pth"
494
+ opt.batch_size = 1
495
+ opt.workers = 0
496
+ opt.scorer = "mean"
497
+ opt.blackbg = True
498
+ elif modelName=="parseq":
499
+ opt.benchmark_all_eval = True
500
+ opt.Transformation = "None"
501
+ opt.FeatureExtraction = "None"
502
+ opt.SequenceModeling = "None"
503
+ opt.Prediction = "None"
504
+ opt.Transformer = True
505
+ opt.sensitive = True
506
+ opt.imgH = 32
507
+ opt.imgW = 128
508
+ opt.data_filtering_off = True
509
+ opt.batch_size = 1
510
+ opt.workers = 0
511
+ opt.scorer = "mean"
512
+ opt.blackbg = True
513
+ sampleDemo(opt, modelName)
utils.py CHANGED
@@ -296,11 +296,11 @@ def get_device(verbose=True):
296
  return device
297
 
298
 
299
- def get_args(is_train=True):
300
  parser = argparse.ArgumentParser(description='STR')
301
 
302
  # for test
303
- parser.add_argument('--eval_data', required=not is_train, help='path to evaluation dataset')
304
  parser.add_argument('--benchmark_all_eval', action='store_true', help='evaluate 10 benchmark evaluation datasets')
305
  parser.add_argument('--calculate_infer_time', action='store_true', help='calculate inference timing')
306
  parser.add_argument('--flops', action='store_true', help='calculates approx flops (may not work)')
@@ -362,11 +362,10 @@ def get_args(is_train=True):
362
 
363
  choices = ["vitstr_tiny_patch16_224", "vitstr_small_patch16_224", "vitstr_base_patch16_224", "vitstr_tiny_distilled_patch16_224", "vitstr_small_distilled_patch16_224"]
364
  parser.add_argument('--TransformerModel', default=choices[0], help='Which vit/deit transformer model', choices=choices)
365
- parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')
366
- parser.add_argument('--FeatureExtraction', type=str, required=True,
367
- help='FeatureExtraction stage. VGG|RCNN|ResNet')
368
- parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')
369
- parser.add_argument('--Prediction', type=str, required=True, help='Prediction stage. None|CTC|Attn')
370
  parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
371
  parser.add_argument('--input_channel', type=int, default=1,
372
  help='the number of input channel of Feature extractor')
 
296
  return device
297
 
298
 
299
+ def get_args(is_train=True, model=None):
300
  parser = argparse.ArgumentParser(description='STR')
301
 
302
  # for test
303
+ parser.add_argument('--eval_data', help='path to evaluation dataset')
304
  parser.add_argument('--benchmark_all_eval', action='store_true', help='evaluate 10 benchmark evaluation datasets')
305
  parser.add_argument('--calculate_infer_time', action='store_true', help='calculate inference timing')
306
  parser.add_argument('--flops', action='store_true', help='calculates approx flops (may not work)')
 
362
 
363
  choices = ["vitstr_tiny_patch16_224", "vitstr_small_patch16_224", "vitstr_base_patch16_224", "vitstr_tiny_distilled_patch16_224", "vitstr_small_distilled_patch16_224"]
364
  parser.add_argument('--TransformerModel', default=choices[0], help='Which vit/deit transformer model', choices=choices)
365
+ parser.add_argument('--Transformation', type=str, help='Transformation stage. None|TPS')
366
+ parser.add_argument('--FeatureExtraction', type=str, help='FeatureExtraction stage. VGG|RCNN|ResNet')
367
+ parser.add_argument('--SequenceModeling', type=str, help='SequenceModeling stage. None|BiLSTM')
368
+ parser.add_argument('--Prediction', type=str, help='Prediction stage. None|CTC|Attn')
 
369
  parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
370
  parser.add_argument('--input_channel', type=int, default=1,
371
  help='the number of input channel of Feature extractor')