KonradSzafer commited on
Commit
f9b4b24
·
1 Parent(s): cd726a4

llmlingua optimization

Browse files
Files changed (1) hide show
  1. qa_engine/qa_engine.py +3 -0
qa_engine/qa_engine.py CHANGED
@@ -86,6 +86,9 @@ class TransformersPipelineModel(LLM):
86
  )
87
 
88
  def _call(self, prompt: str, stop: Optional[list[str]] = None) -> str:
 
 
 
89
  output_text = self.pipeline(prompt)[0]['generated_text']
90
  output_text = output_text.replace(prompt+'\n', '')
91
  return output_text
 
86
  )
87
 
88
  def _call(self, prompt: str, stop: Optional[list[str]] = None) -> str:
89
+ from llmlingua import PromptCompressor
90
+ llm_lingua = PromptCompressor()
91
+ prompt = llm_lingua.compress_prompt(prompt, instruction="", question="", target_token=400)
92
  output_text = self.pipeline(prompt)[0]['generated_text']
93
  output_text = output_text.replace(prompt+'\n', '')
94
  return output_text