James MacQuillan commited on
Commit
fca837c
·
1 Parent(s): fa01583
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -20,11 +20,11 @@ import numpy as np
20
  from sklearn.manifold import TSNE
21
  import plotly.graph_objects as go
22
 
23
- from langchain.document_loaders import DirectoryLoader, TextLoader
24
  from langchain.text_splitter import CharacterTextSplitter
25
  from langchain.schema import Document
26
  import chromadb.utils.embedding_functions as embedding_functions
27
- from langchain.embeddings import HuggingFaceEmbeddings
28
 
29
  hf_token = os.getenv('HF_TOKEN')
30
  huggingface_ef = embedding_functions.HuggingFaceEmbeddingFunction(
@@ -52,7 +52,7 @@ search_splitter = CharacterTextSplitter(chunk_size=20, chunk_overlap=2)
52
  parts = search_splitter.split_text(search_requirements_text)
53
 
54
 
55
- search_documents = [Document(page_content=chunk) for chunk in chunks]
56
 
57
  # Initialize Chroma with documents and embeddings
58
  search_vectorstore = Chroma.from_documents(
 
20
  from sklearn.manifold import TSNE
21
  import plotly.graph_objects as go
22
 
23
+ from langchain_community.document_loaders import TextLoader
24
  from langchain.text_splitter import CharacterTextSplitter
25
  from langchain.schema import Document
26
  import chromadb.utils.embedding_functions as embedding_functions
27
+ from langchain_community.embeddings import HuggingFaceEmbeddings
28
 
29
  hf_token = os.getenv('HF_TOKEN')
30
  huggingface_ef = embedding_functions.HuggingFaceEmbeddingFunction(
 
52
  parts = search_splitter.split_text(search_requirements_text)
53
 
54
 
55
+ search_documents = [Document(page_content=chunk) for chunk in parts]
56
 
57
  # Initialize Chroma with documents and embeddings
58
  search_vectorstore = Chroma.from_documents(