Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,8 +22,8 @@ import chainlit as cl
|
|
22 |
load_dotenv()
|
23 |
|
24 |
|
25 |
-
|
26 |
-
|
27 |
|
28 |
|
29 |
def metadata_generator(document, name):
|
@@ -37,22 +37,20 @@ def metadata_generator(document, name):
|
|
37 |
doc.metadata["source"] = name
|
38 |
return collection
|
39 |
|
40 |
-
|
41 |
-
recursive_blueprint_document = metadata_generator(ai_blueprint_document, "AI Blueprint")
|
42 |
-
combined_documents = recursive_framework_document + recursive_blueprint_document
|
43 |
|
44 |
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
|
45 |
|
46 |
vectorstore = Qdrant.from_documents(
|
47 |
-
documents=
|
48 |
embedding=embeddings,
|
49 |
location=":memory:",
|
50 |
-
collection_name="
|
51 |
)
|
52 |
alt_retriever = vectorstore.as_retriever()
|
53 |
|
54 |
## Generation LLM
|
55 |
-
llm = ChatOpenAI(model="gpt-4o
|
56 |
|
57 |
RAG_PROMPT = """\
|
58 |
You are an AI Policy Expert.
|
@@ -79,7 +77,7 @@ retrieval_augmented_qa_chain = (
|
|
79 |
| {"response": rag_prompt | llm, "context": itemgetter("context")}
|
80 |
)
|
81 |
|
82 |
-
|
83 |
|
84 |
@cl.on_message
|
85 |
async def handle_message(message):
|
|
|
22 |
load_dotenv()
|
23 |
|
24 |
|
25 |
+
document = PyMuPDFLoader(file_path="sk-proj-eIwEnyq0pSHm0s3yiMdZT3BlbkFJ2aeAlVApXY6jveE8aJ9t").load()
|
26 |
+
|
27 |
|
28 |
|
29 |
def metadata_generator(document, name):
|
|
|
37 |
doc.metadata["source"] = name
|
38 |
return collection
|
39 |
|
40 |
+
documents = metadata_generator(document, "Propaganda")
|
|
|
|
|
41 |
|
42 |
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
|
43 |
|
44 |
vectorstore = Qdrant.from_documents(
|
45 |
+
documents=documents,
|
46 |
embedding=embeddings,
|
47 |
location=":memory:",
|
48 |
+
collection_name="Propaganda"
|
49 |
)
|
50 |
alt_retriever = vectorstore.as_retriever()
|
51 |
|
52 |
## Generation LLM
|
53 |
+
llm = ChatOpenAI(model="gpt-4o")
|
54 |
|
55 |
RAG_PROMPT = """\
|
56 |
You are an AI Policy Expert.
|
|
|
77 |
| {"response": rag_prompt | llm, "context": itemgetter("context")}
|
78 |
)
|
79 |
|
80 |
+
|
81 |
|
82 |
@cl.on_message
|
83 |
async def handle_message(message):
|