Kevin Hu commited on
Commit
c530565
·
1 Parent(s): bb139f2

refine context length (#1813)

Browse files

### What problem does this PR solve?

#1594
### Type of change

- [x] Performance Improvement

Files changed (1) hide show
  1. graphrag/index.py +1 -1
graphrag/index.py CHANGED
@@ -68,7 +68,7 @@ def build_knowlege_graph_chunks(tenant_id: str, chunks: List[str], callback, ent
68
  llm_bdl = LLMBundle(tenant_id, LLMType.CHAT)
69
  ext = GraphExtractor(llm_bdl)
70
  left_token_count = llm_bdl.max_length - ext.prompt_token_count - 1024
71
- left_token_count = llm_bdl.max_length * 0.4
72
 
73
  assert left_token_count > 0, f"The LLM context length({llm_bdl.max_length}) is smaller than prompt({ext.prompt_token_count})"
74
 
 
68
  llm_bdl = LLMBundle(tenant_id, LLMType.CHAT)
69
  ext = GraphExtractor(llm_bdl)
70
  left_token_count = llm_bdl.max_length - ext.prompt_token_count - 1024
71
+ left_token_count = max(llm_bdl.max_length * 0.8, left_token_count)
72
 
73
  assert left_token_count > 0, f"The LLM context length({llm_bdl.max_length}) is smaller than prompt({ext.prompt_token_count})"
74