spdraptor commited on
Commit
eebe067
·
1 Parent(s): ed51f1c
modules/__pycache__/masking_module.cpython-310.pyc CHANGED
Binary files a/modules/__pycache__/masking_module.cpython-310.pyc and b/modules/__pycache__/masking_module.cpython-310.pyc differ
 
modules/masking_module.py CHANGED
@@ -137,6 +137,7 @@ def masking_process(image,obj):
137
  # task_prompt = '<REGION_TO_SEGMENTATION>'
138
  # # task_prompt = '<OPEN_VOCABULARY_DETECTION>'
139
  # print(type(task_prompt),type(obj))
 
140
  image = Image.fromarray(image).convert("RGB")
141
 
142
  # results = florence2(image,task_prompt, text_input=obj)
@@ -152,13 +153,17 @@ def masking_process(image,obj):
152
  # obj = "Tiger"
153
 
154
  Florence_results = florence2(image,task_prompt, text_input=obj)
 
155
  SAM_IMAGE_MODEL = load_sam_image_model(device=device)
 
156
  detections = sv.Detections.from_lmm(
157
  lmm=sv.LMM.FLORENCE_2,
158
  result=Florence_results,
159
  resolution_wh=image.size
160
  )
 
161
  response = run_sam_inference(SAM_IMAGE_MODEL, image, detections)
 
162
  if response['code'] == 400:
163
  return response
164
  else:
 
137
  # task_prompt = '<REGION_TO_SEGMENTATION>'
138
  # # task_prompt = '<OPEN_VOCABULARY_DETECTION>'
139
  # print(type(task_prompt),type(obj))
140
+ print('1')
141
  image = Image.fromarray(image).convert("RGB")
142
 
143
  # results = florence2(image,task_prompt, text_input=obj)
 
153
  # obj = "Tiger"
154
 
155
  Florence_results = florence2(image,task_prompt, text_input=obj)
156
+ print('2')
157
  SAM_IMAGE_MODEL = load_sam_image_model(device=device)
158
+ print('3')
159
  detections = sv.Detections.from_lmm(
160
  lmm=sv.LMM.FLORENCE_2,
161
  result=Florence_results,
162
  resolution_wh=image.size
163
  )
164
+ print('4')
165
  response = run_sam_inference(SAM_IMAGE_MODEL, image, detections)
166
+ print('5')
167
  if response['code'] == 400:
168
  return response
169
  else: