kevinconka commited on
Commit
ac783cd
·
1 Parent(s): 9b3bb2e

added debug messages

Browse files
Files changed (2) hide show
  1. app.py +19 -6
  2. flagging.py +2 -0
app.py CHANGED
@@ -7,7 +7,8 @@ Any new model should implement the following functions:
7
 
8
  import os
9
  import glob
10
- #import spaces
 
11
  import gradio as gr
12
  from huggingface_hub import get_token
13
  from utils import (
@@ -46,20 +47,26 @@ h1 {
46
  }
47
  """
48
 
49
- model = load_model("experimental/ahoy6-MIX-1280-b1.onnx")
50
  model.det_conf_thresh = 0.1
51
  model.hor_conf_thresh = 0.1
52
 
 
53
  # @spaces.GPU
54
  def inference(image):
55
  """Run inference on image and return annotated image."""
56
  results = model(image)
57
  return results.draw(image)
58
 
59
- def flag_img_input(image: gr.Image, flag_option: str = "misdetection", username: str = "anonymous"):
 
 
 
60
  """Wrapper for flagging"""
 
61
  return hf_writer.flag([image], flag_option=flag_option, username=username)
62
 
 
63
  # Flagging
64
  dataset_name = "SEA-AI/crowdsourced-sea-images"
65
  hf_writer = HuggingFaceDatasetSaver(get_token(), dataset_name)
@@ -74,7 +81,10 @@ with gr.Blocks(theme=theme, css=css, title="SEA.AI Vision Demo") as demo:
74
  with gr.Row():
75
  with gr.Column():
76
  img_input = gr.Image(
77
- label="input", interactive=True, sources=["upload", "clipboard"]
 
 
 
78
  )
79
  img_url = gr.Textbox(
80
  lines=1,
@@ -140,9 +150,12 @@ with gr.Blocks(theme=theme, css=css, title="SEA.AI Vision Demo") as demo:
140
  [],
141
  preprocess=False,
142
  show_api=True,
143
- api_name="flag_misdetection"
144
  ).then(
145
- lambda: load_badges(flagged_counter.count()), [], badges, show_api=False,
 
 
 
146
  )
147
 
148
  # called during initial load in browser
 
7
 
8
  import os
9
  import glob
10
+
11
+ # import spaces
12
  import gradio as gr
13
  from huggingface_hub import get_token
14
  from utils import (
 
47
  }
48
  """
49
 
50
+ model = load_model("experimental/ahoy6-MIX-1280-b1.onnx")
51
  model.det_conf_thresh = 0.1
52
  model.hor_conf_thresh = 0.1
53
 
54
+
55
  # @spaces.GPU
56
  def inference(image):
57
  """Run inference on image and return annotated image."""
58
  results = model(image)
59
  return results.draw(image)
60
 
61
+
62
+ def flag_img_input(
63
+ image: gr.Image, flag_option: str = "misdetection", username: str = "anonymous"
64
+ ):
65
  """Wrapper for flagging"""
66
+ print(f"{image=}, {flag_option=}, {username=}")
67
  return hf_writer.flag([image], flag_option=flag_option, username=username)
68
 
69
+
70
  # Flagging
71
  dataset_name = "SEA-AI/crowdsourced-sea-images"
72
  hf_writer = HuggingFaceDatasetSaver(get_token(), dataset_name)
 
81
  with gr.Row():
82
  with gr.Column():
83
  img_input = gr.Image(
84
+ label="input",
85
+ interactive=True,
86
+ sources=["upload", "clipboard"],
87
+ type="numpy",
88
  )
89
  img_url = gr.Textbox(
90
  lines=1,
 
150
  [],
151
  preprocess=False,
152
  show_api=True,
153
+ api_name="flag_misdetection",
154
  ).then(
155
+ lambda: load_badges(flagged_counter.count()),
156
+ [],
157
+ badges,
158
+ show_api=False,
159
  )
160
 
161
  # called during initial load in browser
flagging.py CHANGED
@@ -319,7 +319,9 @@ class myHuggingFaceDatasetSaver(HuggingFaceDatasetSaver):
319
  save_dir.mkdir(exist_ok=True, parents=True)
320
  deserialized = component.flag(sample, save_dir)
321
  if isinstance(component, gr.Image) and isinstance(sample, dict):
 
322
  deserialized = json.loads(deserialized)["path"] # dirty hack
 
323
 
324
  # Add deserialized object to row
325
  features[label] = {"dtype": "string", "_type": "Value"}
 
319
  save_dir.mkdir(exist_ok=True, parents=True)
320
  deserialized = component.flag(sample, save_dir)
321
  if isinstance(component, gr.Image) and isinstance(sample, dict):
322
+ print(f"Before dirty hack: {deserialized=}")
323
  deserialized = json.loads(deserialized)["path"] # dirty hack
324
+ print(f"After dirty hack: {deserialized=}")
325
 
326
  # Add deserialized object to row
327
  features[label] = {"dtype": "string", "_type": "Value"}