|
import json |
|
|
|
from KMVE_RG.models.SGF_model import SGF |
|
from KMVE_RG.modules.tokenizers import Tokenizer |
|
from KMVE_RG.modules.metrics import compute_scores |
|
import numpy as np |
|
from utils.thyroid_gen_config import config as thyroid_args |
|
from utils.liver_gen_config import config as liver_args |
|
from utils.breast_gen_config import config as breast_args |
|
|
|
import gradio as gr |
|
import torch |
|
from PIL import Image |
|
import os |
|
from torchvision import transforms |
|
|
|
np.random.seed(9233) |
|
torch.manual_seed(9233) |
|
torch.backends.cudnn.deterministic = True |
|
torch.backends.cudnn.benchmark = False |
|
|
|
class Generator(object): |
|
def __init__(self, model_type): |
|
if model_type == '甲状腺': |
|
self.args = thyroid_args |
|
elif model_type == '乳腺': |
|
self.args = breast_args |
|
elif model_type == '肝脏': |
|
self.args = liver_args |
|
self.tokenizer = Tokenizer(self.args) |
|
self.model = SGF(self.args, self.tokenizer) |
|
sd = torch.load(self.args.models)['state_dict'] |
|
msg = self.model.load_state_dict(sd) |
|
print(msg) |
|
self.model.eval() |
|
self.metrics = compute_scores |
|
self.transform = transforms.Compose([ |
|
transforms.Resize((224, 224)), |
|
transforms.ToTensor(), |
|
transforms.Normalize((0.485, 0.456, 0.406), |
|
(0.229, 0.224, 0.225))]) |
|
with open(self.args.ann_path, 'r', encoding='utf-8-sig') as f: |
|
self.data = json.load(f) |
|
print('模型加载完成') |
|
|
|
def image_process(self, img_paths): |
|
image_1 = Image.open(os.path.join(self.args.image_dir, img_paths[0])).convert('RGB') |
|
image_2 = Image.open(os.path.join(self.args.image_dir, img_paths[1])).convert('RGB') |
|
if self.transform is not None: |
|
image_1 = self.transform(image_1) |
|
image_2 = self.transform(image_2) |
|
image = torch.stack((image_1, image_2), 0) |
|
return image |
|
|
|
def generate(self, uid): |
|
img_paths, report = self.data[uid]['img_paths'], self.data[uid]['report'] |
|
imgs = self.image_process(img_paths) |
|
imgs = imgs.unsqueeze(0) |
|
with torch.no_grad(): |
|
output, _ = self.model(imgs, mode='sample') |
|
pred = self.tokenizer.decode(output[0].cpu().numpy()) |
|
gt = self.tokenizer.decode(self.tokenizer(report[:self.args.max_seq_length])[1:]) |
|
scores = self.metrics({0: [gt]}, {0: [pred]}) |
|
return pred, gt, scores |
|
|
|
def visualize_images(self, uid): |
|
image_1 = Image.open(os.path.join(self.args.image_dir, self.data[uid]['img_paths'][0])).convert('RGB') |
|
image_2 = Image.open(os.path.join(self.args.image_dir, self.data[uid]['img_paths'][1])).convert('RGB') |
|
return image_1, image_2 |
|
|
|
|
|
def demo(): |
|
with gr.Blocks() as app: |
|
gr.Markdown("# 超声报告生成Demo") |
|
gr.Markdown('### SIAT认知与交互技术中心') |
|
gr.Markdown('### 项目主页:https://lijunrio.github.io/Ultrasound-Report-Generation/') |
|
|
|
|
|
with gr.Row(): |
|
model_choice = gr.Radio(choices=["甲状腺", "乳腺", "肝脏"], label="请选择模型类型", interactive=True) |
|
|
|
model = gr.State() |
|
|
|
|
|
uids = [f"uid_{i}" for i in range(20)] |
|
with gr.Row(): |
|
uid_choice = gr.Radio(choices=[f"{uid}" for uid in uids], label="请选择uid", interactive=False) |
|
|
|
|
|
with gr.Row(): |
|
image1_display = gr.Image(label="图像1", visible=True) |
|
image2_display = gr.Image(label="图像2", visible=True) |
|
|
|
|
|
generate_button = gr.Button("生成报告", interactive=False) |
|
generated_report_display = gr.Textbox(label="生成的报告", visible=True) |
|
ground_truth_display = gr.Textbox(label="Ground Truth报告", visible=True) |
|
nlp_score_display = gr.Textbox(label="NLP得分", visible=True) |
|
|
|
|
|
def load_model_and_uids(model_type): |
|
model = Generator(model_type) |
|
return model, gr.update(interactive=True) |
|
|
|
|
|
def on_uid_click(model, uid): |
|
image1, image2 = model.visualize_images(uid) |
|
|
|
return image1, image2, gr.update(interactive=True) |
|
|
|
|
|
def on_generate_click(model, uid): |
|
generated_report, ground_truth_report, nlp_score = model.generate(uid) |
|
|
|
return generated_report, ground_truth_report, f"NLP得分: {nlp_score}" |
|
|
|
|
|
model_choice.change(load_model_and_uids, inputs=model_choice, outputs=[model, uid_choice]) |
|
|
|
|
|
uid_choice.change(on_uid_click, inputs=[model, uid_choice], outputs=[image1_display, image2_display, generate_button]) |
|
|
|
generate_button.click(on_generate_click, inputs=[model, uid_choice], outputs=[generated_report_display, ground_truth_display, nlp_score_display]) |
|
|
|
return app |
|
|
|
if __name__ == '__main__': |
|
|
|
demo().launch() |
|
|
|
|