Spaces:
Sleeping
Sleeping
franzi2505
commited on
Commit
·
bada1b4
1
Parent(s):
28b135a
add feature to calculate pq-score per area
Browse files- PanopticQuality.py +12 -10
PanopticQuality.py
CHANGED
@@ -12,7 +12,7 @@
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
"""TODO: Add a description here."""
|
15 |
-
from typing import Dict, List
|
16 |
|
17 |
import evaluate
|
18 |
import datasets
|
@@ -82,6 +82,7 @@ class PQMetric(evaluate.Metric):
|
|
82 |
stuff: List[str] = None,
|
83 |
per_class: bool = True,
|
84 |
split_sq_rq: bool = True,
|
|
|
85 |
**kwargs
|
86 |
):
|
87 |
super().__init__(**kwargs)
|
@@ -117,7 +118,8 @@ class PQMetric(evaluate.Metric):
|
|
117 |
things=set([self.label2id[label] for label in self.label2id.keys() if label not in self.stuff]),
|
118 |
stuffs=set([self.label2id[label] for label in self.label2id.keys() if label in self.stuff]),
|
119 |
return_per_class=per_class,
|
120 |
-
return_sq_and_rq=split_sq_rq
|
|
|
121 |
)
|
122 |
self.things_stuffs = sorted(self.pq_metric.things) + sorted(self.pq_metric.stuffs)
|
123 |
|
@@ -174,22 +176,22 @@ class PQMetric(evaluate.Metric):
|
|
174 |
iou = self.pq_metric.metric.iou_sum.clone()
|
175 |
|
176 |
# compute scores
|
177 |
-
result = self.pq_metric.compute() # shape : (n_classes (sorted things + sorted stuffs), scores (pq, sq, rq))
|
178 |
|
179 |
result_dict = dict()
|
180 |
|
181 |
if self.per_class:
|
182 |
if not self.split_sq_rq:
|
183 |
-
result = result.
|
184 |
-
result_dict["scores"] = {self.id2label[numeric_label]: result[i].tolist() \
|
185 |
for i, numeric_label in enumerate(self.things_stuffs)}
|
186 |
-
result_dict["scores"].update({"ALL": result.mean(
|
187 |
-
result_dict["numbers"] = {self.id2label[numeric_label]: [tp[i].
|
188 |
for i, numeric_label in enumerate(self.things_stuffs)}
|
189 |
-
result_dict["numbers"].update({"ALL": [tp.sum().
|
190 |
else:
|
191 |
-
result_dict["scores"] = {"ALL": result.tolist() if self.split_sq_rq else [result.tolist()]}
|
192 |
-
result_dict["numbers"] = {"ALL": [tp.sum().
|
193 |
|
194 |
return result_dict
|
195 |
|
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
"""TODO: Add a description here."""
|
15 |
+
from typing import Dict, List, Tuple
|
16 |
|
17 |
import evaluate
|
18 |
import datasets
|
|
|
82 |
stuff: List[str] = None,
|
83 |
per_class: bool = True,
|
84 |
split_sq_rq: bool = True,
|
85 |
+
area_rng: List[Tuple[float]] = [(0, 1e8)],
|
86 |
**kwargs
|
87 |
):
|
88 |
super().__init__(**kwargs)
|
|
|
118 |
things=set([self.label2id[label] for label in self.label2id.keys() if label not in self.stuff]),
|
119 |
stuffs=set([self.label2id[label] for label in self.label2id.keys() if label in self.stuff]),
|
120 |
return_per_class=per_class,
|
121 |
+
return_sq_and_rq=split_sq_rq,
|
122 |
+
areas=area_rng
|
123 |
)
|
124 |
self.things_stuffs = sorted(self.pq_metric.things) + sorted(self.pq_metric.stuffs)
|
125 |
|
|
|
176 |
iou = self.pq_metric.metric.iou_sum.clone()
|
177 |
|
178 |
# compute scores
|
179 |
+
result = self.pq_metric.compute() # shape : (area_rngs, n_classes (sorted things + sorted stuffs), scores (pq, sq, rq))
|
180 |
|
181 |
result_dict = dict()
|
182 |
|
183 |
if self.per_class:
|
184 |
if not self.split_sq_rq:
|
185 |
+
result = result.unsqueeze(0)
|
186 |
+
result_dict["scores"] = {self.id2label[numeric_label]: result[:,:, i].tolist() \
|
187 |
for i, numeric_label in enumerate(self.things_stuffs)}
|
188 |
+
result_dict["scores"].update({"ALL": result.mean(dim=-1).tolist()})
|
189 |
+
result_dict["numbers"] = {self.id2label[numeric_label]: [tp[:, i].tolist(), fp[:, i].tolist(), fn[:, i].tolist(), iou[:, i].tolist()] \
|
190 |
for i, numeric_label in enumerate(self.things_stuffs)}
|
191 |
+
result_dict["numbers"].update({"ALL": [tp.sum(dim=1).tolist(), fp.sum(dim=1).tolist(), fn.sum(dim=1).tolist(), iou.sum(dim=1).tolist()]})
|
192 |
else:
|
193 |
+
result_dict["scores"] = {"ALL": result.tolist() if self.split_sq_rq else ([result.tolist()] if len(self.pq_metric.get_areas())>1 else [[result.tolist()]])}
|
194 |
+
result_dict["numbers"] = {"ALL": [tp.sum(dim=-1).tolist(), fp.sum(dim=-1).tolist(), fn.sum(dim=-1).tolist(), iou.sum(dim=-1).tolist()]}
|
195 |
|
196 |
return result_dict
|
197 |
|