suchinth08 commited on
Commit
47055fe
·
verified ·
1 Parent(s): ee13ae6

Update lawchain.py

Browse files
Files changed (1) hide show
  1. lawchain.py +2 -2
lawchain.py CHANGED
@@ -20,9 +20,9 @@ persist_directory = 'db'
20
  instructor_embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-base")
21
  embedding = instructor_embeddings
22
  #tokenizer = AutoTokenizer.from_pretrained("lmsys/fastchat-t5-3b-v1.0")
23
- tokenizer = AutoTokenizer.from_pretrained("lmsys/fastchat-t5-3b-v1.0",use_fast=False, legacy=False)
24
  model = AutoModelForSeq2SeqLM.from_pretrained("lmsys/fastchat-t5-3b-v1.0")
25
- pipe = pipeline("text2text-generation",model=model, tokenizer=tokenizer,max_new_tokens=300)
26
  local_llm = HuggingFacePipeline(pipeline=pipe)
27
  vectordb = Chroma(persist_directory=persist_directory,embedding_function=embedding)
28
  retriever = vectordb.as_retriever(search_kwargs={"k": 3})
 
20
  instructor_embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-base")
21
  embedding = instructor_embeddings
22
  #tokenizer = AutoTokenizer.from_pretrained("lmsys/fastchat-t5-3b-v1.0")
23
+ tokenizer = AutoTokenizer.from_pretrained("lmsys/fastchat-t5-3b-v1.0",use_fast=True, legacy=False)
24
  model = AutoModelForSeq2SeqLM.from_pretrained("lmsys/fastchat-t5-3b-v1.0")
25
+ pipe = pipeline("text2text-generation",model=model, tokenizer=tokenizer,max_new_tokens=200)
26
  local_llm = HuggingFacePipeline(pipeline=pipe)
27
  vectordb = Chroma(persist_directory=persist_directory,embedding_function=embedding)
28
  retriever = vectordb.as_retriever(search_kwargs={"k": 3})