Saif Rehman Nasir commited on
Commit
9618bfc
·
1 Parent(s): a26ec13

Update rag

Browse files
Files changed (1) hide show
  1. rag.py +3 -3
rag.py CHANGED
@@ -14,7 +14,7 @@ from langchain_huggingface import HuggingFaceEndpoint
14
 
15
  from typing import Dict, Any
16
  from tqdm import tqdm
17
- from transformers import LlamaTokenizer
18
 
19
  NEO4J_URI = os.getenv("NEO4J_URI")
20
  NEO4J_USERNAME = os.getenv("NEO4J_USERNAME")
@@ -28,7 +28,7 @@ chat_llm = HuggingFaceEndpoint(
28
  do_sample=False,
29
  )
30
 
31
- global_tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
32
 
33
 
34
  def local_retriever(query: str):
@@ -277,7 +277,7 @@ def global_retriever(query: str, level: int, response_type: str):
277
 
278
  ###Debug####
279
 
280
- tokens = global_tokenizer.encode(intermediate_results)
281
  print(f"Number of input tokens: {len(tokens)}")
282
  ###Debug###
283
  final_response = reduce_chain.invoke(
 
14
 
15
  from typing import Dict, Any
16
  from tqdm import tqdm
17
+ from transformers import AutoTokenizer
18
 
19
  NEO4J_URI = os.getenv("NEO4J_URI")
20
  NEO4J_USERNAME = os.getenv("NEO4J_USERNAME")
 
28
  do_sample=False,
29
  )
30
 
31
+ global_tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
32
 
33
 
34
  def local_retriever(query: str):
 
277
 
278
  ###Debug####
279
 
280
+ tokens = global_tokenizer(intermediate_results)
281
  print(f"Number of input tokens: {len(tokens)}")
282
  ###Debug###
283
  final_response = reduce_chain.invoke(