vre_data_analysis script and nb
Browse files
scripts/dronescapes_viewer.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
scripts/vre_data_analysis.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
scripts/vre_data_analysis.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
from vre.readers import MultiTaskDataset
|
3 |
+
from vre.representations import build_representations_from_cfg, Representation
|
4 |
+
from vre.representations.cv_representations import SemanticRepresentation
|
5 |
+
from vre.logger import vre_logger as logger
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
import numpy as np
|
8 |
+
import pandas as pd
|
9 |
+
import io
|
10 |
+
import base64
|
11 |
+
import bs4
|
12 |
+
from PIL import Image
|
13 |
+
|
14 |
+
def extract_pil_from_b64_image(base64_buf: str) -> Image:
|
15 |
+
return Image.open(io.BytesIO(base64.b64decode(base64_buf)))
|
16 |
+
|
17 |
+
def extract_b64_image_from_fig(fig: plt.Figure) -> str:
|
18 |
+
buffer = io.BytesIO()
|
19 |
+
fig.savefig(buffer, format="png", dpi=fig.dpi)
|
20 |
+
buffer.seek(0)
|
21 |
+
base64_buf = base64.b64encode(buffer.getvalue()).decode("utf-8")
|
22 |
+
return base64_buf
|
23 |
+
|
24 |
+
def extract_b64_imgsrc_from_fig(fig: plt.Figure) -> str:
|
25 |
+
base64_buf = extract_b64_image_from_fig(fig)
|
26 |
+
return f"""<img src="data:image/png;base64,{base64_buf}" alt="Sample Plot">"""
|
27 |
+
|
28 |
+
def save_html(html_imgs: list[str], description: str, out_path: str):
|
29 |
+
html = bs4.BeautifulSoup(f"""
|
30 |
+
<!DOCTYPE html>
|
31 |
+
<html>
|
32 |
+
<head>
|
33 |
+
<title>VRE Dataset Analysis</title>
|
34 |
+
</head>
|
35 |
+
<body>
|
36 |
+
<h1 id="description">Description</h1>
|
37 |
+
<h1 id="plots">Plots</h1>
|
38 |
+
</body>
|
39 |
+
</html>""", features="lxml")
|
40 |
+
html.find(id="description").insert_after(bs4.BeautifulSoup(description.replace("\n", "<br/>"), features="lxml"))
|
41 |
+
for html_img in html_imgs[::-1]:
|
42 |
+
html.find(id="plots").insert_after(bs4.BeautifulSoup(html_img, features="lxml"))
|
43 |
+
open(out_path, "w").write(str(html))
|
44 |
+
print(f"Written html at '{out_path}'")
|
45 |
+
|
46 |
+
def histogram_from_classification_task(reader: MultiTaskDataset, classif: SemanticRepresentation,
|
47 |
+
n: int | None = None, mode: str = "sequential", **figkwargs) -> plt.Figure:
|
48 |
+
fig = plt.Figure(**figkwargs)
|
49 |
+
counts = np.zeros(len(classif.classes), dtype=np.uint64)
|
50 |
+
ixs = np.arange(len(reader)) if mode == "sequential" else np.random.permutation(len(reader))
|
51 |
+
ixs = ixs[0:n] if n is not None and n < len(reader) else ixs
|
52 |
+
assert getattr(classif, "load_mode", "binary") == "binary", classif.load_mode
|
53 |
+
for i in ixs:
|
54 |
+
item = reader.get_one_item(i.item(), subset_tasks=[classif.name])
|
55 |
+
data_cnts = item[0][classif.name].unique(return_counts=True)
|
56 |
+
item_classes, item_counts = data_cnts[0].numpy().astype(int), data_cnts[1].numpy().astype(int)
|
57 |
+
counts[item_classes] = counts[item_classes] + item_counts
|
58 |
+
|
59 |
+
df = pd.DataFrame({"Labels": classif.classes, "Values": counts})
|
60 |
+
df["Values"] = df["Values"] / df["Values"].sum()
|
61 |
+
df = df.sort_values("Values", ascending=True)
|
62 |
+
df = df[df["Values"] > 0.01]
|
63 |
+
df.plot(x="Labels", y="Values", kind="barh", legend=False, color="skyblue", ax=fig.gca(), title=classif.name)
|
64 |
+
fig.gca().set_xlim(0, 1)
|
65 |
+
# fig.gca().set_ylabel("Values")
|
66 |
+
fig.tight_layout()
|
67 |
+
plt.close()
|
68 |
+
return fig
|
69 |
+
|
70 |
+
def gaussian_from_statistics(reader: MultiTaskDataset, regression_task: Representation) -> plt.Figure:
|
71 |
+
_, __, mean, std = [x.numpy() for x in reader.statistics[regression_task.name]]
|
72 |
+
fig, ax = plt.subplots(1, n_ch := mean.shape[0], figsize=(10, 5))
|
73 |
+
ax = [ax] if n_ch == 1 else ax
|
74 |
+
x = np.linspace(mean - 4*std, mean + 4*std, 1000)
|
75 |
+
y = (1 / (std * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((x - mean) / std) ** 2)
|
76 |
+
for i in range(n_ch):
|
77 |
+
ax[i].plot(x[:, i], y[:, i])
|
78 |
+
fig.suptitle(regression_task.name)
|
79 |
+
return fig
|
80 |
+
|
81 |
+
if __name__ == "__main__":
|
82 |
+
data_path = sys.argv[1]
|
83 |
+
cfg_path = sys.argv[2]
|
84 |
+
representations = build_representations_from_cfg(cfg_path)
|
85 |
+
print(representations)
|
86 |
+
reader = MultiTaskDataset(data_path, task_names=list(representations),
|
87 |
+
task_types=representations, normalization="min_max")
|
88 |
+
print(reader)
|
89 |
+
|
90 |
+
imgsrcs = []
|
91 |
+
for classif_task in reader.classification_tasks:
|
92 |
+
fig = histogram_from_classification_task(reader, classif_task)
|
93 |
+
imgsrcs.append(extract_b64_imgsrc_from_fig(fig))
|
94 |
+
|
95 |
+
regression_tasks = [t for t in reader.tasks if t not in reader.classification_tasks]
|
96 |
+
for regression_task in regression_tasks:
|
97 |
+
fig = gaussian_from_statistics(reader, regression_task)
|
98 |
+
imgsrcs.append(extract_b64_imgsrc_from_fig(fig))
|
99 |
+
|
100 |
+
save_html(imgsrcs, str(reader), "plot.html")
|