File size: 4,547 Bytes
d5a4886
 
 
3ebf54f
d5a4886
 
 
 
 
3ebf54f
d5a4886
 
 
 
 
 
 
 
3ebf54f
d5a4886
 
 
 
89d1722
d5a4886
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3ebf54f
 
 
 
 
 
d5a4886
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3ebf54f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import pathlib
import torch
import torch.hub
from torchvision.transforms.functional import convert_image_dtype
from torchvision.io.image import ImageReadMode, encode_png, decode_image

from mcquic import Config
from mcquic.modules.compressor import BaseCompressor, Compressor
from mcquic.datasets.transforms import AlignedCrop
from mcquic.utils.specification import File
from mcquic.utils.vision import DeTransform

try:
    import streamlit as st
except:
    raise ImportError("To run `mcquic service`, please install Streamlit by `pip install streamlit` firstly.")


MODELS_URL = "https://github.com/xiaosu-zhu/McQuic/releases/download/generic/qp_3_msssim_fcc58b73.mcquic"


@st.experimental_singleton
def loadModel(qp: int, local: pathlib.Path, device, mse: bool):
    ckpt = torch.hub.load_state_dict_from_url(MODELS_URL, map_location=device, check_hash=True)

    config = Config.deserialize(ckpt["config"])
    model = Compressor(**config.Model.Params).to(device)
    model.QuantizationParameter = str(local) if local is not None else str(qp)
    model.load_state_dict(ckpt["model"])
    return model



@st.cache
def compressImage(image: torch.Tensor, model: BaseCompressor, crop: bool) -> File:
    image = convert_image_dtype(image)

    if crop:
        image = AlignedCrop()(image)

    # [c, h, w]
    image = (image - 0.5) * 2

    with model._quantizer.readyForCoding() as cdfs:
        codes, binaries, headers = model.compress(image[None, ...], cdfs)

    return File(headers[0], binaries[0])


@st.cache
def decompressImage(sourceFile: File, model: BaseCompressor) -> torch.ByteTensor:
    binaries = sourceFile.Content

    with model._quantizer.readyForCoding() as cdfs:
        # [1, c, h, w]
        restored = model.decompress([binaries], cdfs, [sourceFile.FileHeader])

    # [c, h, w]
    return DeTransform()(restored[0])



def main(debug: bool, quiet: bool, disable_gpu: bool):
    if disable_gpu or not torch.cuda.is_available():
        device = torch.device("cpu")
    else:
        device = torch.device("cuda")

    model = loadModel(3, None, device, False).eval()

    st.sidebar.markdown("""
<p align="center">
  <a href="https://github.com/xiaosu-zhu/McQuic">
    <img src="https://raw.githubusercontent.com/xiaosu-zhu/McQuic/main/assets/McQuic-light.svg" alt="McQuic" title="McQuic" width="45%"/>
  </a>
  <br/>
  <span>
    <i>a.k.a.</i> <b><i>M</i></b>ulti-<b><i>c</i></b>odebook <b><i>Qu</i></b>antizers for neural <b><i>i</i></b>mage <b><i>c</i></b>ompression
  </span>
</p>

<p align="center">
  Compressing images on-the-fly.
</p>


<a href="#">
  <image src="https://img.shields.io/badge/NOTE-yellow?style=for-the-badge" alt="NOTE"/>
</a>

> Due to resources limitation, I only provide compression service with model `qp = 3`.


<a href="#">
  <image src="https://img.shields.io/github/stars/xiaosu-zhu/McQuic?style=social" alt="Github"/>
</a>

""", unsafe_allow_html=True)


    with st.form("SubmitForm"):
        uploadedFile = st.file_uploader("Try running McQuic to compress or restore images!", type=["png", "jpg", "jpeg", "mcq"], help="Upload your image or compressed `.mcq` file here.")
        cropping = st.checkbox("Cropping image to align grids.", help="If checked, the image is cropped to align to feature map grids. This makes output smaller.")
        submitted = st.form_submit_button("Submit", help="Click to start compress/restore.")
    if submitted and uploadedFile is not None:
        if uploadedFile.name.endswith(".mcq"):
            uploadedFile.flush()

            binaryFile = File.deserialize(uploadedFile.read())

            st.text(str(binaryFile))

            result = decompressImage(binaryFile, model)
            st.image(result.cpu().permute(1, 2, 0).numpy())
            st.download_button("Click to download restored image", data=bytes(encode_png(result.cpu()).tolist()), file_name=".".join(uploadedFile.name.split(".")[:-1] + ["png"]), mime="image/png")
        else:
            raw = torch.ByteTensor(torch.ByteStorage.from_buffer(uploadedFile.read())) # type: ignore
            image = decode_image(raw, ImageReadMode.RGB).to(device)
            st.image(image.cpu().permute(1, 2, 0).numpy())
            result = compressImage(image, model, cropping)

            st.text(str(result))

            st.download_button("Click to download compressed file", data=result.serialize(), file_name=".".join(uploadedFile.name.split(".")[:-1] + ["mcq"]), mime="image/mcq")


if __name__ == "__main__":
    with torch.inference_mode():
        main(False, False, False)