zhichyu commited on
Commit
563b0d7
·
1 Parent(s): 6101699

handle_task catch all exception (#3441)

Browse files

### What problem does this PR solve?

handle_task catch all exception
Report heartbeats

### Type of change

- [x] Refactoring

api/apps/system_app.py CHANGED
@@ -13,7 +13,7 @@
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License
15
  #
16
- import json
17
  from datetime import datetime
18
 
19
  from flask_login import login_required, current_user
@@ -154,26 +154,16 @@ def status():
154
  "error": str(e),
155
  }
156
 
 
157
  try:
158
- v = REDIS_CONN.get("TASKEXE")
159
- if not v:
160
- raise Exception("No task executor running!")
161
- obj = json.loads(v)
162
- color = "green"
163
- for id in obj.keys():
164
- arr = obj[id]
165
- if len(arr) == 1:
166
- obj[id] = [0]
167
- else:
168
- obj[id] = [arr[i + 1] - arr[i] for i in range(len(arr) - 1)]
169
- elapsed = max(obj[id])
170
- if elapsed > 50:
171
- color = "yellow"
172
- if elapsed > 120:
173
- color = "red"
174
- res["task_executor"] = {"status": color, "elapsed": obj}
175
- except Exception as e:
176
- res["task_executor"] = {"status": "red", "error": str(e)}
177
 
178
  return get_json_result(data=res)
179
 
 
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License
15
  #
16
+ import logging
17
  from datetime import datetime
18
 
19
  from flask_login import login_required, current_user
 
154
  "error": str(e),
155
  }
156
 
157
+ task_executor_heartbeats = {}
158
  try:
159
+ task_executors = REDIS_CONN.smembers("TASKEXE")
160
+ now = datetime.now().timestamp()
161
+ for task_executor_id in task_executors:
162
+ heartbeats = REDIS_CONN.zrangebyscore(task_executor_id, now - 60*30, now)
163
+ task_executor_heartbeats[task_executor_id] = heartbeats
164
+ except Exception:
165
+ logging.exception("get task executor heartbeats failed!")
166
+ res["task_executor_heartbeats"] = task_executor_heartbeats
 
 
 
 
 
 
 
 
 
 
 
167
 
168
  return get_json_result(data=res)
169
 
api/db/services/task_service.py CHANGED
@@ -36,7 +36,7 @@ class TaskService(CommonService):
36
 
37
  @classmethod
38
  @DB.connection_context()
39
- def get_tasks(cls, task_id):
40
  fields = [
41
  cls.model.id,
42
  cls.model.doc_id,
@@ -63,7 +63,7 @@ class TaskService(CommonService):
63
  .join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id)) \
64
  .where(cls.model.id == task_id)
65
  docs = list(docs.dicts())
66
- if not docs: return []
67
 
68
  msg = "\nTask has been received."
69
  prog = random.random() / 10.
@@ -77,9 +77,9 @@ class TaskService(CommonService):
77
  ).where(
78
  cls.model.id == docs[0]["id"]).execute()
79
 
80
- if docs[0]["retry_count"] >= 3: return []
81
 
82
- return docs
83
 
84
  @classmethod
85
  @DB.connection_context()
@@ -108,7 +108,7 @@ class TaskService(CommonService):
108
  task = cls.model.get_by_id(id)
109
  _, doc = DocumentService.get_by_id(task.doc_id)
110
  return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
111
- except Exception as e:
112
  pass
113
  return False
114
 
 
36
 
37
  @classmethod
38
  @DB.connection_context()
39
+ def get_task(cls, task_id):
40
  fields = [
41
  cls.model.id,
42
  cls.model.doc_id,
 
63
  .join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id)) \
64
  .where(cls.model.id == task_id)
65
  docs = list(docs.dicts())
66
+ if not docs: return None
67
 
68
  msg = "\nTask has been received."
69
  prog = random.random() / 10.
 
77
  ).where(
78
  cls.model.id == docs[0]["id"]).execute()
79
 
80
+ if docs[0]["retry_count"] >= 3: return None
81
 
82
+ return docs[0]
83
 
84
  @classmethod
85
  @DB.connection_context()
 
108
  task = cls.model.get_by_id(id)
109
  _, doc = DocumentService.get_by_id(task.doc_id)
110
  return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
111
+ except Exception:
112
  pass
113
  return False
114
 
rag/svr/task_executor.py CHANGED
@@ -42,7 +42,6 @@ from multiprocessing.context import TimeoutError
42
  from timeit import default_timer as timer
43
 
44
  import numpy as np
45
- import pandas as pd
46
 
47
  from api.db import LLMType, ParserType
48
  from api.db.services.dialog_service import keyword_extraction, question_proposal
@@ -85,10 +84,9 @@ CONSUMER_NAME = "task_consumer_" + CONSUMER_NO
85
  PAYLOAD: Payload | None = None
86
  BOOT_AT = datetime.now().isoformat()
87
  DONE_TASKS = 0
88
- RETRY_TASKS = 0
89
  PENDING_TASKS = 0
90
- HEAD_CREATED_AT = ""
91
- HEAD_DETAIL = ""
92
 
93
 
94
  def set_progress(task_id, from_page=0, to_page=-1, prog=None, msg="Processing..."):
@@ -120,34 +118,35 @@ def set_progress(task_id, from_page=0, to_page=-1, prog=None, msg="Processing...
120
 
121
 
122
  def collect():
123
- global CONSUMER_NAME, PAYLOAD
124
  try:
125
  PAYLOAD = REDIS_CONN.get_unacked_for(CONSUMER_NAME, SVR_QUEUE_NAME, "rag_flow_svr_task_broker")
126
  if not PAYLOAD:
127
  PAYLOAD = REDIS_CONN.queue_consumer(SVR_QUEUE_NAME, "rag_flow_svr_task_broker", CONSUMER_NAME)
128
  if not PAYLOAD:
129
  time.sleep(1)
130
- return pd.DataFrame()
131
  except Exception:
132
  logging.exception("Get task event from queue exception")
133
- return pd.DataFrame()
134
 
135
  msg = PAYLOAD.get_message()
136
  if not msg:
137
- return pd.DataFrame()
138
 
139
  if TaskService.do_cancel(msg["id"]):
 
140
  logging.info("Task {} has been canceled.".format(msg["id"]))
141
- return pd.DataFrame()
142
- tasks = TaskService.get_tasks(msg["id"])
143
- if not tasks:
 
144
  logging.warning("{} empty task!".format(msg["id"]))
145
- return []
146
 
147
- tasks = pd.DataFrame(tasks)
148
  if msg.get("type", "") == "raptor":
149
- tasks["task_type"] = "raptor"
150
- return tasks
151
 
152
 
153
  def get_storage_binary(bucket, name):
@@ -176,14 +175,14 @@ def build(row):
176
  callback(-1, "Internal server error: Fetch file from minio timeout. Could you try it again.")
177
  logging.exception(
178
  "Minio {}/{} got timeout: Fetch file from minio timeout.".format(row["location"], row["name"]))
179
- return
180
  except Exception as e:
181
  if re.search("(No such file|not found)", str(e)):
182
  callback(-1, "Can not find file <%s> from minio. Could you try it again?" % row["name"])
183
  else:
184
  callback(-1, "Get file from minio: %s" % str(e).replace("'", ""))
185
  logging.exception("Chunking {}/{} got exception".format(row["location"], row["name"]))
186
- return
187
 
188
  try:
189
  cks = chunker.chunk(row["name"], binary=binary, from_page=row["from_page"],
@@ -194,7 +193,7 @@ def build(row):
194
  callback(-1, "Internal server error while chunking: %s" %
195
  str(e).replace("'", ""))
196
  logging.exception("Chunking {}/{} got exception".format(row["location"], row["name"]))
197
- return
198
 
199
  docs = []
200
  doc = {
@@ -212,6 +211,7 @@ def build(row):
212
  d["create_time"] = str(datetime.now()).replace("T", " ")[:19]
213
  d["create_timestamp_flt"] = datetime.now().timestamp()
214
  if not d.get("image"):
 
215
  d["img_id"] = ""
216
  d["page_num_list"] = json.dumps([])
217
  d["position_list"] = json.dumps([])
@@ -232,6 +232,7 @@ def build(row):
232
  except Exception:
233
  logging.exception(
234
  "Saving image of chunk {}/{}/{} got exception".format(row["location"], row["name"], d["_id"]))
 
235
 
236
  d["img_id"] = "{}-{}".format(row["kb_id"], d["id"])
237
  del d["image"]
@@ -356,105 +357,111 @@ def run_raptor(row, chat_mdl, embd_mdl, callback=None):
356
  return res, tk_count, vector_size
357
 
358
 
359
- def main():
360
- rows = collect()
361
- if len(rows) == 0:
362
- return
363
-
364
- for _, r in rows.iterrows():
365
- callback = partial(set_progress, r["id"], r["from_page"], r["to_page"])
 
366
  try:
367
- embd_mdl = LLMBundle(r["tenant_id"], LLMType.EMBEDDING, llm_name=r["embd_id"], lang=r["language"])
 
368
  except Exception as e:
369
  callback(-1, msg=str(e))
370
- logging.exception("LLMBundle got exception")
371
- continue
372
-
373
- if r.get("task_type", "") == "raptor":
374
- try:
375
- chat_mdl = LLMBundle(r["tenant_id"], LLMType.CHAT, llm_name=r["llm_id"], lang=r["language"])
376
- cks, tk_count, vector_size = run_raptor(r, chat_mdl, embd_mdl, callback)
377
- except Exception as e:
378
- callback(-1, msg=str(e))
379
- logging.exception("run_raptor got exception")
380
- continue
381
- else:
382
- st = timer()
383
- cks = build(r)
384
- logging.info("Build chunks({}): {}".format(r["name"], timer() - st))
385
- if cks is None:
386
- continue
387
- if not cks:
388
- callback(1., "No chunk! Done!")
389
- continue
390
- # TODO: exception handler
391
- ## set_progress(r["did"], -1, "ERROR: ")
392
- callback(
393
  msg="Finished slicing files ({} chunks in {:.2f}s). Start to embedding the content.".format(len(cks),
394
  timer() - st)
395
- )
396
- st = timer()
397
- try:
398
- tk_count, vector_size = embedding(cks, embd_mdl, r["parser_config"], callback)
399
- except Exception as e:
400
- callback(-1, "Embedding error:{}".format(str(e)))
401
- logging.exception("run_rembedding got exception")
402
- tk_count = 0
403
- logging.info("Embedding elapsed({}): {:.2f}".format(r["name"], timer() - st))
404
- callback(msg="Finished embedding (in {:.2f}s)! Start to build index!".format(timer() - st))
405
-
406
- # logging.info(f"task_executor init_kb index {search.index_name(r["tenant_id"])} embd_mdl {embd_mdl.llm_name} vector length {vector_size}")
407
- init_kb(r, vector_size)
408
- chunk_count = len(set([c["id"] for c in cks]))
409
  st = timer()
410
- es_r = ""
411
- es_bulk_size = 4
412
- for b in range(0, len(cks), es_bulk_size):
413
- es_r = settings.docStoreConn.insert(cks[b:b + es_bulk_size], search.index_name(r["tenant_id"]), r["kb_id"])
414
- if b % 128 == 0:
415
- callback(prog=0.8 + 0.1 * (b + 1) / len(cks), msg="")
416
-
417
- logging.info("Indexing elapsed({}): {:.2f}".format(r["name"], timer() - st))
418
- if es_r:
419
- callback(-1, "Insert chunk error, detail info please check log file. Please also check ES status!")
420
- settings.docStoreConn.delete({"doc_id": r["doc_id"]}, search.index_name(r["tenant_id"]), r["kb_id"])
421
- logging.error('Insert chunk error: ' + str(es_r))
422
- else:
423
- if TaskService.do_cancel(r["id"]):
424
- settings.docStoreConn.delete({"doc_id": r["doc_id"]}, search.index_name(r["tenant_id"]), r["kb_id"])
425
- continue
426
- callback(msg="Indexing elapsed in {:.2f}s.".format(timer() - st))
427
- callback(1., "Done!")
428
- DocumentService.increment_chunk_num(
429
- r["doc_id"], r["kb_id"], tk_count, chunk_count, 0)
430
- logging.info(
431
- "Chunk doc({}), token({}), chunks({}), elapsed:{:.2f}".format(
432
- r["id"], tk_count, len(cks), timer() - st))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433
 
434
 
435
  def report_status():
436
- global CONSUMER_NAME, BOOT_AT, DONE_TASKS, RETRY_TASKS, PENDING_TASKS, HEAD_CREATED_AT, HEAD_DETAIL
437
  REDIS_CONN.sadd("TASKEXE", CONSUMER_NAME)
438
  while True:
439
  try:
440
  now = datetime.now()
441
- PENDING_TASKS = REDIS_CONN.queue_length(SVR_QUEUE_NAME)
442
- if PENDING_TASKS > 0:
443
- head_info = REDIS_CONN.queue_head(SVR_QUEUE_NAME)
444
- if head_info is not None:
445
- seconds = int(head_info[0].split("-")[0]) / 1000
446
- HEAD_CREATED_AT = datetime.fromtimestamp(seconds).isoformat()
447
- HEAD_DETAIL = head_info[1]
448
 
449
  heartbeat = json.dumps({
450
  "name": CONSUMER_NAME,
451
  "now": now.isoformat(),
452
  "boot_at": BOOT_AT,
453
  "done": DONE_TASKS,
454
- "retry": RETRY_TASKS,
455
  "pending": PENDING_TASKS,
456
- "head_created_at": HEAD_CREATED_AT,
457
- "head_detail": HEAD_DETAIL,
458
  })
459
  REDIS_CONN.zadd(CONSUMER_NAME, heartbeat, now.timestamp())
460
  logging.info(f"{CONSUMER_NAME} reported heartbeat: {heartbeat}")
@@ -466,14 +473,13 @@ def report_status():
466
  logging.exception("report_status got exception")
467
  time.sleep(30)
468
 
469
-
470
- if __name__ == "__main__":
471
  background_thread = threading.Thread(target=report_status)
472
  background_thread.daemon = True
473
  background_thread.start()
474
 
475
  while True:
476
- main()
477
- if PAYLOAD:
478
- PAYLOAD.ack()
479
- PAYLOAD = None
 
42
  from timeit import default_timer as timer
43
 
44
  import numpy as np
 
45
 
46
  from api.db import LLMType, ParserType
47
  from api.db.services.dialog_service import keyword_extraction, question_proposal
 
84
  PAYLOAD: Payload | None = None
85
  BOOT_AT = datetime.now().isoformat()
86
  DONE_TASKS = 0
87
+ FAILED_TASKS = 0
88
  PENDING_TASKS = 0
89
+ LAG_TASKS = 0
 
90
 
91
 
92
  def set_progress(task_id, from_page=0, to_page=-1, prog=None, msg="Processing..."):
 
118
 
119
 
120
  def collect():
121
+ global CONSUMER_NAME, PAYLOAD, DONE_TASKS, FAILED_TASKS
122
  try:
123
  PAYLOAD = REDIS_CONN.get_unacked_for(CONSUMER_NAME, SVR_QUEUE_NAME, "rag_flow_svr_task_broker")
124
  if not PAYLOAD:
125
  PAYLOAD = REDIS_CONN.queue_consumer(SVR_QUEUE_NAME, "rag_flow_svr_task_broker", CONSUMER_NAME)
126
  if not PAYLOAD:
127
  time.sleep(1)
128
+ return None
129
  except Exception:
130
  logging.exception("Get task event from queue exception")
131
+ return None
132
 
133
  msg = PAYLOAD.get_message()
134
  if not msg:
135
+ return None
136
 
137
  if TaskService.do_cancel(msg["id"]):
138
+ DONE_TASKS += 1
139
  logging.info("Task {} has been canceled.".format(msg["id"]))
140
+ return None
141
+ task = TaskService.get_task(msg["id"])
142
+ if not task:
143
+ DONE_TASKS += 1
144
  logging.warning("{} empty task!".format(msg["id"]))
145
+ return None
146
 
 
147
  if msg.get("type", "") == "raptor":
148
+ task["task_type"] = "raptor"
149
+ return task
150
 
151
 
152
  def get_storage_binary(bucket, name):
 
175
  callback(-1, "Internal server error: Fetch file from minio timeout. Could you try it again.")
176
  logging.exception(
177
  "Minio {}/{} got timeout: Fetch file from minio timeout.".format(row["location"], row["name"]))
178
+ raise
179
  except Exception as e:
180
  if re.search("(No such file|not found)", str(e)):
181
  callback(-1, "Can not find file <%s> from minio. Could you try it again?" % row["name"])
182
  else:
183
  callback(-1, "Get file from minio: %s" % str(e).replace("'", ""))
184
  logging.exception("Chunking {}/{} got exception".format(row["location"], row["name"]))
185
+ raise
186
 
187
  try:
188
  cks = chunker.chunk(row["name"], binary=binary, from_page=row["from_page"],
 
193
  callback(-1, "Internal server error while chunking: %s" %
194
  str(e).replace("'", ""))
195
  logging.exception("Chunking {}/{} got exception".format(row["location"], row["name"]))
196
+ raise
197
 
198
  docs = []
199
  doc = {
 
211
  d["create_time"] = str(datetime.now()).replace("T", " ")[:19]
212
  d["create_timestamp_flt"] = datetime.now().timestamp()
213
  if not d.get("image"):
214
+ _ = d.pop("image", None)
215
  d["img_id"] = ""
216
  d["page_num_list"] = json.dumps([])
217
  d["position_list"] = json.dumps([])
 
232
  except Exception:
233
  logging.exception(
234
  "Saving image of chunk {}/{}/{} got exception".format(row["location"], row["name"], d["_id"]))
235
+ raise
236
 
237
  d["img_id"] = "{}-{}".format(row["kb_id"], d["id"])
238
  del d["image"]
 
357
  return res, tk_count, vector_size
358
 
359
 
360
+ def do_handle_task(r):
361
+ callback = partial(set_progress, r["id"], r["from_page"], r["to_page"])
362
+ try:
363
+ embd_mdl = LLMBundle(r["tenant_id"], LLMType.EMBEDDING, llm_name=r["embd_id"], lang=r["language"])
364
+ except Exception as e:
365
+ callback(-1, msg=str(e))
366
+ raise
367
+ if r.get("task_type", "") == "raptor":
368
  try:
369
+ chat_mdl = LLMBundle(r["tenant_id"], LLMType.CHAT, llm_name=r["llm_id"], lang=r["language"])
370
+ cks, tk_count, vector_size = run_raptor(r, chat_mdl, embd_mdl, callback)
371
  except Exception as e:
372
  callback(-1, msg=str(e))
373
+ raise
374
+ else:
375
+ st = timer()
376
+ cks = build(r)
377
+ logging.info("Build chunks({}): {}".format(r["name"], timer() - st))
378
+ if cks is None:
379
+ return
380
+ if not cks:
381
+ callback(1., "No chunk! Done!")
382
+ return
383
+ # TODO: exception handler
384
+ ## set_progress(r["did"], -1, "ERROR: ")
385
+ callback(
 
 
 
 
 
 
 
 
 
 
386
  msg="Finished slicing files ({} chunks in {:.2f}s). Start to embedding the content.".format(len(cks),
387
  timer() - st)
388
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
389
  st = timer()
390
+ try:
391
+ tk_count, vector_size = embedding(cks, embd_mdl, r["parser_config"], callback)
392
+ except Exception as e:
393
+ callback(-1, "Embedding error:{}".format(str(e)))
394
+ logging.exception("run_rembedding got exception")
395
+ tk_count = 0
396
+ raise
397
+ logging.info("Embedding elapsed({}): {:.2f}".format(r["name"], timer() - st))
398
+ callback(msg="Finished embedding (in {:.2f}s)! Start to build index!".format(timer() - st))
399
+ # logging.info(f"task_executor init_kb index {search.index_name(r["tenant_id"])} embd_mdl {embd_mdl.llm_name} vector length {vector_size}")
400
+ init_kb(r, vector_size)
401
+ chunk_count = len(set([c["id"] for c in cks]))
402
+ st = timer()
403
+ es_r = ""
404
+ es_bulk_size = 4
405
+ for b in range(0, len(cks), es_bulk_size):
406
+ es_r = settings.docStoreConn.insert(cks[b:b + es_bulk_size], search.index_name(r["tenant_id"]), r["kb_id"])
407
+ if b % 128 == 0:
408
+ callback(prog=0.8 + 0.1 * (b + 1) / len(cks), msg="")
409
+ logging.info("Indexing elapsed({}): {:.2f}".format(r["name"], timer() - st))
410
+ if es_r:
411
+ callback(-1, "Insert chunk error, detail info please check log file. Please also check Elasticsearch/Infinity status!")
412
+ settings.docStoreConn.delete({"doc_id": r["doc_id"]}, search.index_name(r["tenant_id"]), r["kb_id"])
413
+ logging.error('Insert chunk error: ' + str(es_r))
414
+ raise Exception('Insert chunk error: ' + str(es_r))
415
+
416
+ if TaskService.do_cancel(r["id"]):
417
+ settings.docStoreConn.delete({"doc_id": r["doc_id"]}, search.index_name(r["tenant_id"]), r["kb_id"])
418
+ return
419
+
420
+ callback(msg="Indexing elapsed in {:.2f}s.".format(timer() - st))
421
+ callback(1., "Done!")
422
+ DocumentService.increment_chunk_num(
423
+ r["doc_id"], r["kb_id"], tk_count, chunk_count, 0)
424
+ logging.info(
425
+ "Chunk doc({}), token({}), chunks({}), elapsed:{:.2f}".format(
426
+ r["id"], tk_count, len(cks), timer() - st))
427
+
428
+
429
+ def handle_task():
430
+ global PAYLOAD, DONE_TASKS, FAILED_TASKS
431
+ task = collect()
432
+ if task:
433
+ try:
434
+ logging.info(f"handle_task begin for task {json.dumps(task)}")
435
+ do_handle_task(task)
436
+ DONE_TASKS += 1
437
+ logging.exception(f"handle_task done for task {json.dumps(task)}")
438
+ except Exception:
439
+ FAILED_TASKS += 1
440
+ logging.exception(f"handle_task got exception for task {json.dumps(task)}")
441
+ if PAYLOAD:
442
+ PAYLOAD.ack()
443
+ PAYLOAD = None
444
 
445
 
446
  def report_status():
447
+ global CONSUMER_NAME, BOOT_AT, DONE_TASKS, FAILED_TASKS, PENDING_TASKS, LAG_TASKS
448
  REDIS_CONN.sadd("TASKEXE", CONSUMER_NAME)
449
  while True:
450
  try:
451
  now = datetime.now()
452
+ group_info = REDIS_CONN.queue_info(SVR_QUEUE_NAME, "rag_flow_svr_task_broker")
453
+ if group_info is not None:
454
+ PENDING_TASKS = int(group_info["pending"])
455
+ LAG_TASKS = int(group_info["lag"])
 
 
 
456
 
457
  heartbeat = json.dumps({
458
  "name": CONSUMER_NAME,
459
  "now": now.isoformat(),
460
  "boot_at": BOOT_AT,
461
  "done": DONE_TASKS,
462
+ "failed": FAILED_TASKS,
463
  "pending": PENDING_TASKS,
464
+ "lag": LAG_TASKS,
 
465
  })
466
  REDIS_CONN.zadd(CONSUMER_NAME, heartbeat, now.timestamp())
467
  logging.info(f"{CONSUMER_NAME} reported heartbeat: {heartbeat}")
 
473
  logging.exception("report_status got exception")
474
  time.sleep(30)
475
 
476
+ def main():
 
477
  background_thread = threading.Thread(target=report_status)
478
  background_thread.daemon = True
479
  background_thread.start()
480
 
481
  while True:
482
+ handle_task()
483
+
484
+ if __name__ == "__main__":
485
+ main()
rag/utils/redis_conn.py CHANGED
@@ -225,14 +225,16 @@ class RedisDB:
225
  logging.exception("xpending_range: " + consumer_name + " got exception")
226
  self.__open__()
227
 
228
- def queue_length(self, queue) -> int:
229
  for _ in range(3):
230
  try:
231
- num = self.REDIS.xlen(queue)
232
- return num
 
 
233
  except Exception:
234
  logging.exception("queue_length" + str(queue) + " got exception")
235
- return 0
236
 
237
  def queue_head(self, queue) -> int:
238
  for _ in range(3):
 
225
  logging.exception("xpending_range: " + consumer_name + " got exception")
226
  self.__open__()
227
 
228
+ def queue_info(self, queue, group_name) -> dict:
229
  for _ in range(3):
230
  try:
231
+ groups = self.REDIS.xinfo_groups(queue)
232
+ for group in groups:
233
+ if group["name"] == group_name:
234
+ return group
235
  except Exception:
236
  logging.exception("queue_length" + str(queue) + " got exception")
237
+ return None
238
 
239
  def queue_head(self, queue) -> int:
240
  for _ in range(3):