Saiteja Solleti commited on
Commit
14249c4
·
1 Parent(s): 041c22e

Revert "adding token limit"

Browse files

This reverts commit 041c22e9286bedbf5173c91cc4afa6ebc1b41eea.

Files changed (1) hide show
  1. formatresultshelper.py +0 -3
formatresultshelper.py CHANGED
@@ -1,6 +1,5 @@
1
  import generationhelper
2
  import json
3
- from generationhelper import Enforce_token_limit
4
 
5
  def evaluate_response_with_prompt(templete, query, documents, answer, eval_model="llama-3.3-70b-specdec"):
6
 
@@ -16,8 +15,6 @@ def evaluate_response_with_prompt(templete, query, documents, answer, eval_model
16
 
17
  prompt = templete.format(documents=formatted_documents, question=query, answer=formatted_answer)
18
 
19
- Enforce_token_limit(prompt)
20
-
21
  # Call the LLM API (Llama 3.3-70B)
22
  completion = generationhelper.groq_client.chat.completions.create(
23
  model=eval_model,
 
1
  import generationhelper
2
  import json
 
3
 
4
  def evaluate_response_with_prompt(templete, query, documents, answer, eval_model="llama-3.3-70b-specdec"):
5
 
 
15
 
16
  prompt = templete.format(documents=formatted_documents, question=query, answer=formatted_answer)
17
 
 
 
18
  # Call the LLM API (Llama 3.3-70B)
19
  completion = generationhelper.groq_client.chat.completions.create(
20
  model=eval_model,