igmarco commited on
Commit
f172b96
·
1 Parent(s): a2e440e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -15
app.py CHANGED
@@ -1,20 +1,41 @@
1
  from huggingface_hub import from_pretrained_fastai
2
  import gradio as gr
3
- # from fastai.vision.all import *
4
- from fastai.text.all import *
5
 
 
 
6
 
 
7
 
8
- # repo_id = "YOUR_USERNAME/YOUR_LEARNER_NAME"
9
- repo_id = "igmarco/LSTM-text-classification"
10
-
11
- learner = from_pretrained_fastai(repo_id)
12
- labels = ['hate speech', 'offensive language', 'neither']
13
-
14
- # Definimos una función que se encarga de llevar a cabo las predicciones
15
- def predict(txt):
16
- pred,pred_idx,probs = learner.predict(txt)
17
- return {labels[i]: float(probs[i]) for i in range(len(labels))}
18
-
19
- # Creamos la interfaz y la lanzamos.
20
- gr.Interface(fn=predict, inputs=gr.Textbox(lines=2, placeholder="Text Here..."), outputs=gr.outputs.Label(num_top_classes=3)).launch(share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from huggingface_hub import from_pretrained_fastai
2
  import gradio as gr
3
+ from fastai.vision.all import *
4
+ from icevision.all import *
5
 
6
+ # repo_id = "YOUR_USERNAME/YOUR_LEARNER_NAME"
7
+ repo_id = "igmarco/raccoon-image-detection"
8
 
9
+ model1 = from_pretrained_fastai(repo_id)
10
 
11
+ def show_preds(input_image, display_label, display_bbox, detection_threshold):
12
+
13
+ if detection_threshold==0: detection_threshold=0.5
14
+
15
+ img = PIL.Image.fromarray(input_image, 'RGB')
16
+
17
+ pred_dict = models.torchvision.faster_rcnn.end2end_detect(img, valid_tfms, model1, class_map=class_map, detection_threshold=detection_threshold,
18
+ display_label=display_label, display_bbox=display_bbox, return_img=True,
19
+ font_size=16, label_color="#FF59D6")
20
+
21
+ return pred_dict['img']
22
+
23
+ # display_chkbox = gr.inputs.CheckboxGroup(["Label", "BBox"], label="Display", default=True)
24
+ display_chkbox_label = gr.inputs.Checkbox(label="Label", default=True)
25
+ display_chkbox_box = gr.inputs.Checkbox(label="Box", default=True)
26
+
27
+ detection_threshold_slider = gr.inputs.Slider(minimum=0, maximum=1, step=0.1, default=0.5, label="Detection Threshold")
28
+
29
+ outputs = gr.outputs.Image(type="pil")
30
+
31
+ # Option 1: Get an image from local drive
32
+ gr_interface = gr.Interface(fn=show_preds, inputs=["image", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs,examples=['raccoon1.jpg','raccoon2.jpg'])
33
+
34
+ # # Option 2: Grab an image from a webcam
35
+ # gr_interface = gr.Interface(fn=show_preds, inputs=["webcam", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs, title='IceApp - COCO', live=False)
36
+
37
+ # # Option 3: Continuous image stream from the webcam
38
+ # gr_interface = gr.Interface(fn=show_preds, inputs=["webcam", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs, title='IceApp - COCO', live=True)
39
+
40
+
41
+ gr_interface.launch(inline=False, share=False, debug=True)