Spaces:
Sleeping
Sleeping
Update lawchain.py
Browse files- lawchain.py +1 -1
lawchain.py
CHANGED
@@ -20,7 +20,7 @@ persist_directory = 'db'
|
|
20 |
instructor_embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-base")
|
21 |
embedding = instructor_embeddings
|
22 |
#tokenizer = AutoTokenizer.from_pretrained("lmsys/fastchat-t5-3b-v1.0")
|
23 |
-
tokenizer = AutoTokenizer.from_pretrained("lmsys/fastchat-t5-3b-v1.0",use_fast=
|
24 |
model = AutoModelForSeq2SeqLM.from_pretrained("lmsys/fastchat-t5-3b-v1.0")
|
25 |
pipe = pipeline("text2text-generation",model=model, tokenizer=tokenizer,max_new_tokens=200)
|
26 |
local_llm = HuggingFacePipeline(pipeline=pipe)
|
|
|
20 |
instructor_embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-base")
|
21 |
embedding = instructor_embeddings
|
22 |
#tokenizer = AutoTokenizer.from_pretrained("lmsys/fastchat-t5-3b-v1.0")
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained("lmsys/fastchat-t5-3b-v1.0",use_fast=False, legacy=False)
|
24 |
model = AutoModelForSeq2SeqLM.from_pretrained("lmsys/fastchat-t5-3b-v1.0")
|
25 |
pipe = pipeline("text2text-generation",model=model, tokenizer=tokenizer,max_new_tokens=200)
|
26 |
local_llm = HuggingFacePipeline(pipeline=pipe)
|