Saif Rehman Nasir commited on
Commit
bdf0a45
·
1 Parent(s): 9aaf62d

Update output parameters of generator and retriever LLM

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. rag.py +1 -1
app.py CHANGED
@@ -35,7 +35,7 @@ def respond(
35
 
36
  for message in client.chat_completion(
37
  messages,
38
- max_tokens=2048,
39
  stream=True,
40
  temperature=1.0,
41
  top_p=top_p,
 
35
 
36
  for message in client.chat_completion(
37
  messages,
38
+ max_tokens=6192,
39
  stream=True,
40
  temperature=1.0,
41
  top_p=top_p,
rag.py CHANGED
@@ -23,7 +23,7 @@ vector_index = os.getenv("VECTOR_INDEX")
23
  chat_llm = HuggingFaceEndpoint(
24
  repo_id="meta-llama/Meta-Llama-3-8B-Instruct",
25
  task="text-generation",
26
- max_new_tokens=6000,
27
  do_sample=False,
28
  )
29
 
 
23
  chat_llm = HuggingFaceEndpoint(
24
  repo_id="meta-llama/Meta-Llama-3-8B-Instruct",
25
  task="text-generation",
26
+ max_new_tokens=2048,
27
  do_sample=False,
28
  )
29