File size: 3,993 Bytes
548170b
 
 
 
 
 
 
 
 
 
c3c07bf
 
548170b
 
 
 
 
 
 
 
b3933a0
 
 
 
 
 
 
 
 
 
 
 
 
548170b
b3933a0
548170b
b3933a0
 
548170b
 
 
 
 
 
 
 
754cf17
548170b
 
2245755
548170b
 
b3933a0
548170b
 
b3933a0
548170b
 
 
 
 
 
 
 
 
139b430
548170b
 
 
 
139b430
548170b
 
 
 
b3933a0
548170b
 
 
 
 
 
b3933a0
548170b
 
 
abaa9d5
548170b
 
754cf17
 
 
 
 
 
 
 
abaa9d5
 
 
 
 
548170b
b3933a0
548170b
 
b3933a0
 
548170b
 
 
 
 
 
754cf17
548170b
 
 
b3933a0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import gradio as gr
from prediction import run_sequence_prediction
import torch
import torchvision.transforms as T
from celle.utils import process_image
from PIL import Image
from matplotlib import pyplot as plt


def gradio_demo(model_name, sequence_input, image):
    model = f"CELL-E_2-Image_Prediction/models/{model_name}.ckpt"
    config = f"CELL-E_2-Image_Prediction/models/{model_name}.yaml"

    if "Finetuned" in model_name:
        dataset = "OpenCell"

    else:
        dataset = "HPA"
        
        
    nucleus_image = image['image'].convert('L')
    protein_image = image['mask'].convert('L')

    to_tensor = T.ToTensor()
    nucleus_tensor = to_tensor(nucleus_image)
    protein_tensor = to_tensor(protein_image)
    stacked_images = torch.stack([nucleus_tensor, protein_tensor], dim=0)
    processed_images = process_image(stacked_images, dataset)

    nucleus_image = processed_images[0].unsqueeze(0)
    protein_image = processed_images[1].unsqueeze(0)
    protein_image = protein_image > 0
    protein_image = 1.0 * protein_image
    
    print(f'{protein_image.sum()}')
    

    formatted_predicted_sequence = run_sequence_prediction(
        sequence_input=sequence_input,
        nucleus_image=nucleus_image,
        protein_image=protein_image,
        model_ckpt_path=model,
        model_config_path=config,
        device=device,
    )
    
    return T.ToPILImage()(protein_image), T.ToPILImage()(nucleus_image), formatted_predicted_sequence


with gr.Blocks(theme='gradio/soft') as demo:
    gr.Markdown("Select the prediction model.")
    gr.Markdown(
        "- CELL-E_2_HPA_2560 is a good general purpose model for various cell types using ICC-IF."
    )
    gr.Markdown(
        "- CELL-E_2_OpenCell_2560 is trained on OpenCell and is good more live-cell predictions on HEK cells."
    )
    with gr.Row():
        model_name = gr.Dropdown(
            ["CELL-E_2_HPA_2560", "CELL-E_2_OpenCell_2560"],
            value="CELL-E_2_HPA_2560",
            label="Model Name",
        )
    with gr.Row():
        gr.Markdown(
            "Input the desired amino acid sequence. GFP is shown below by default. The sequence must include ```<mask>``` for a prediction to be run."
        )

    with gr.Row():
        sequence_input = gr.Textbox(
            value="M<mask><mask><mask><mask><mask>SKGEELFTGVVPILVELDGDVNGHKFSVSGEGEGDATYGKLTLKFICTTGKLPVPWPTLVTTFSYGVQCFSRYPDHMKQHDFFKSAMPEGYVQERTIFFKDDGNYKTRAEVKFEGDTLVNRIELKGIDFKEDGNILGHKLEYNYNSHNVYIMADKQKNGIKVNFKIRHNIEDGSVQLADHYQQNTPIGDGPVLLPDNHYLSTQSALSKDPNEKRDHMVLLEFVTAAGITHGMDELYK",
            label="Sequence",
        )
    with gr.Row():
        gr.Markdown(
            "Uploading a nucleus image is necessary. A random crop of 256 x 256 will be applied if larger. We provide default images in [images](https://huggingface.co/spaces/HuangLab/CELL-E_2/tree/main/images). Draw the desired localization on top of the nucelus image."
        )

    with gr.Row().style(equal_height=True):
        nucleus_image = gr.Image(
            source="upload", 
            tool="sketch",
            invert_colors=True,
            label="Nucleus Image", 
            interactive=True,
            image_mode="L",
            type="pil"
        )


    with gr.Row().style(equal_height=True):
        nucleus_crop = gr.Image(
            label="Nucleus Image (Crop)", 
            image_mode="L",
            type="pil"
        )

        mask = gr.Image(
            label="Threshold Image", 
            image_mode="L",
            type="pil"
        )
    with gr.Row():
        gr.Markdown("Sequence predictions are show below.")

    with gr.Row().style(equal_height=True):
        predicted_sequence = gr.Textbox(label='Predicted Sequence')


    with gr.Row():
        button = gr.Button("Run Model")

        inputs = [model_name, sequence_input, nucleus_image]

        outputs = [mask, nucleus_crop, predicted_sequence]

        button.click(gradio_demo, inputs, outputs)

demo.launch(enable_queue=True)