edgilr commited on
Commit
c58ca64
verified
1 Parent(s): acffed1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -18
app.py CHANGED
@@ -1,24 +1,47 @@
1
- from huggingface_hub import from_pretrained_fastai
 
 
 
2
  import gradio as gr
3
- from fastai.vision.all import *
4
  import os
5
- try:
6
- import toml
7
- except ImportError:
8
- os.system('pip install toml')
9
- import toml
10
 
11
- repo_id = "edgilr/ChestXRays"
 
 
 
 
 
12
 
13
- learner = from_pretrained_fastai(repo_id)
14
- labels = learner.dls.vocab
 
15
 
16
- # Definimos una funci贸n que se encarga de llevar a cabo las predicciones
17
- def predict(img):
18
- #img = PILImage.create(img)
19
- pred,pred_idx,probs = learner.predict(img)
20
- return {labels[i]: float(probs[i]) for i in range(len(labels))}
21
-
22
- # Creamos la interfaz y la lanzamos.
23
- gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=gr.outputs.Label(num_top_classes=3),examples=['normal.jpeg','pneumonia.jpeg']).launch(share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
 
1
+ from gradio.outputs import Label
2
+ from icevision.all import *
3
+ from icevision.models.checkpoint import *
4
+ import PIL
5
  import gradio as gr
 
6
  import os
 
 
 
 
 
7
 
8
+ # Load model
9
+ checkpoint_path = "edgilr/model_checkpoint.pth"
10
+ checkpoint_and_model = model_from_checkpoint(checkpoint_path)
11
+ model = checkpoint_and_model["model"]
12
+ model_type = checkpoint_and_model["model_type"]
13
+ class_map = checkpoint_and_model["class_map"]
14
 
15
+ # Transforms
16
+ img_size = checkpoint_and_model["img_size"]
17
+ valid_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(img_size), tfms.A.Normalize()])
18
 
19
+ # Populate examples in Gradio interface
20
+ examples = [
21
+ ['1.jpg'],
22
+ ['2.jpg'],
23
+ ['3.jpg']
24
+ ]
25
+
26
+ def show_preds(input_image):
27
+ img = PIL.Image.fromarray(input_image, "RGB")
28
+ pred_dict = model_type.end2end_detect(img, valid_tfms, model,
29
+ class_map=class_map,
30
+ detection_threshold=0.5,
31
+ display_label=False,
32
+ display_bbox=True,
33
+ return_img=True,
34
+ font_size=16,
35
+ label_color="#FF59D6")
36
+ return pred_dict["img"]
37
+
38
+ gr_interface = gr.Interface(
39
+ fn=show_preds,
40
+ inputs=["image"],
41
+ outputs=[gr.outputs.Image(type="pil", label="RetinaNet Inference")],
42
+ title="Fridge Object Detector",
43
+ description="A VFNet model that detects common objects found in fridge. Upload an image or click an example image below to use.",
44
+ examples=examples,
45
+ )
46
+ gr_interface.launch(inline=False, share=False, debug=True)
47