Technocoloredgeek commited on
Commit
6121e00
·
verified ·
1 Parent(s): e496e5b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -14
app.py CHANGED
@@ -3,11 +3,12 @@ import os
3
  from langchain_community.document_loaders import PyMuPDFLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_openai import OpenAIEmbeddings, ChatOpenAI
6
- from langchain_community.vectorstores import Qdrant
7
  from langchain.prompts import ChatPromptTemplate
8
- from langchain.schema.output_parser import StrOutputParser
9
- from langchain.schema.runnable import RunnablePassthrough
10
  from qdrant_client import QdrantClient
 
11
  from operator import itemgetter
12
 
13
  # Set up API keys
@@ -37,20 +38,30 @@ def load_and_process_pdfs(pdf_links):
37
 
38
  @st.cache_resource
39
  def setup_vectorstore():
40
- qdrant_client = QdrantClient(":memory:")
41
- embeddings = OpenAIEmbeddings()
42
-
43
  COLLECTION_NAME = "AI_Ethics_Framework"
44
- vector_store = Qdrant(
45
- client=qdrant_client,
 
 
 
 
46
  collection_name=COLLECTION_NAME,
47
- embeddings=embeddings
48
  )
49
-
 
 
 
 
 
 
 
 
50
  documents = load_and_process_pdfs(pdf_links)
51
- vector_store.add_documents(documents)
52
-
53
- return vector_store
54
 
55
  @st.cache_resource
56
  def create_rag_pipeline(vector_store):
@@ -79,7 +90,7 @@ def create_rag_pipeline(vector_store):
79
  """
80
 
81
  prompt = ChatPromptTemplate.from_template(template)
82
- primary_qa_llm = ChatOpenAI(model_name="gpt-4", temperature=0) # Note: Changed from gpt-4o-mini to gpt-4
83
 
84
  retrieval_augmented_qa_chain = (
85
  {"context": itemgetter("question") | retriever, "question": itemgetter("question")}
@@ -89,6 +100,7 @@ def create_rag_pipeline(vector_store):
89
 
90
  return retrieval_augmented_qa_chain
91
 
 
92
  st.title("Ask About AI Ethics!")
93
 
94
  vector_store = setup_vectorstore()
 
3
  from langchain_community.document_loaders import PyMuPDFLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_openai import OpenAIEmbeddings, ChatOpenAI
6
+ from langchain_qdrant import QdrantVectorStore
7
  from langchain.prompts import ChatPromptTemplate
8
+ from langchain_core.output_parsers import StrOutputParser
9
+ from langchain_core.runnables import RunnablePassthrough
10
  from qdrant_client import QdrantClient
11
+ from qdrant_client.http.models import Distance, VectorParams
12
  from operator import itemgetter
13
 
14
  # Set up API keys
 
38
 
39
  @st.cache_resource
40
  def setup_vectorstore():
41
+ LOCATION = ":memory:"
 
 
42
  COLLECTION_NAME = "AI_Ethics_Framework"
43
+ VECTOR_SIZE = 1536
44
+
45
+ qdrant_client = QdrantClient(location=LOCATION)
46
+
47
+ # Create the collection
48
+ qdrant_client.create_collection(
49
  collection_name=COLLECTION_NAME,
50
+ vectors_config=VectorParams(size=VECTOR_SIZE, distance=Distance.COSINE),
51
  )
52
+
53
+ # Create the vector store
54
+ qdrant_vector_store = QdrantVectorStore(
55
+ client=qdrant_client,
56
+ collection_name=COLLECTION_NAME,
57
+ embedding=OpenAIEmbeddings()
58
+ )
59
+
60
+ # Load and add documents
61
  documents = load_and_process_pdfs(pdf_links)
62
+ qdrant_vector_store.add_documents(documents)
63
+
64
+ return qdrant_vector_store
65
 
66
  @st.cache_resource
67
  def create_rag_pipeline(vector_store):
 
90
  """
91
 
92
  prompt = ChatPromptTemplate.from_template(template)
93
+ primary_qa_llm = ChatOpenAI(model_name="gpt-4", temperature=0)
94
 
95
  retrieval_augmented_qa_chain = (
96
  {"context": itemgetter("question") | retriever, "question": itemgetter("question")}
 
100
 
101
  return retrieval_augmented_qa_chain
102
 
103
+ # Streamlit UI
104
  st.title("Ask About AI Ethics!")
105
 
106
  vector_store = setup_vectorstore()