MSEAJYTHTH commited on
Commit
63d6d47
·
verified ·
1 Parent(s): 1ddfe5e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -1,7 +1,7 @@
1
  from langchain.document_loaders import HuggingFaceDatasetLoader
2
  from langchain_community.document_loaders.csv_loader import CSVLoader
3
  from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
4
- from langchain.embeddings import HuggingFaceEmbeddings
5
  from transformers import AutoTokenizer, AutoModelForQuestionAnswering
6
  from transformers import AutoTokenizer, pipeline
7
  from langchain import HuggingFacePipeline
@@ -19,9 +19,6 @@ import difflib
19
  modelPath = "MSEAJYTHTH/NFPC"
20
  # db_files = ["MSEAJYTHTH/NFPC/index.faiss", "MSEAJYTHTH/NFPC/index.pkl"]
21
 
22
- index = FAISS.read_index('MSEAJYTHTH/NFPC/index.faiss')
23
-
24
-
25
  model_kwargs = {'device':'cpu'}
26
 
27
  encode_kwargs = {'normalize_embeddings': False}
@@ -34,7 +31,7 @@ embeddings = HuggingFaceEmbeddings(
34
 
35
 
36
  # 두 파일을 업로드한 후에는 다음과 같이 코드를 수정할 수 있습니다.
37
- db = FAISS.load_local(index, embeddings, allow_dangerous_deserialization=True)
38
 
39
 
40
  def find_best_page_content(question, keywords, db):
 
1
  from langchain.document_loaders import HuggingFaceDatasetLoader
2
  from langchain_community.document_loaders.csv_loader import CSVLoader
3
  from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
4
+ from langchain_community.embeddings import HuggingFaceEmbeddings
5
  from transformers import AutoTokenizer, AutoModelForQuestionAnswering
6
  from transformers import AutoTokenizer, pipeline
7
  from langchain import HuggingFacePipeline
 
19
  modelPath = "MSEAJYTHTH/NFPC"
20
  # db_files = ["MSEAJYTHTH/NFPC/index.faiss", "MSEAJYTHTH/NFPC/index.pkl"]
21
 
 
 
 
22
  model_kwargs = {'device':'cpu'}
23
 
24
  encode_kwargs = {'normalize_embeddings': False}
 
31
 
32
 
33
  # 두 파일을 업로드한 후에는 다음과 같이 코드를 수정할 수 있습니다.
34
+ db = FAISS.load_local("index.faiss", embeddings, allow_dangerous_deserialization=True)
35
 
36
 
37
  def find_best_page_content(question, keywords, db):