File size: 4,695 Bytes
b085e51
 
ed4ca74
b085e51
 
ed4ca74
b085e51
ed4ca74
b085e51
ed4ca74
 
b085e51
 
 
 
 
 
 
ed4ca74
b085e51
6cca36c
ed4ca74
b83d3fb
ed4ca74
 
b085e51
 
 
6cca36c
 
b085e51
 
 
6cca36c
b085e51
 
 
bc4b9a8
b085e51
 
ed4ca74
b085e51
 
 
ffd0213
b085e51
 
 
ed4ca74
 
 
 
b085e51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ffd0213
b085e51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import re
from langchain_openai import OpenAIEmbeddings
from langchain_openai import ChatOpenAI
from langchain_openai.embeddings import OpenAIEmbeddings

from langchain.prompts import ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import StrOutputParser

from langchain_community.document_loaders import PyMuPDFLoader
from langchain_community.vectorstores import Qdrant

from langchain_core.runnables import RunnablePassthrough, RunnableParallel
from langchain_core.documents import Document

from operator import itemgetter
import os
from dotenv import load_dotenv
import chainlit as cl
from langchain.embeddings.base import Embeddings
from sentence_transformers import SentenceTransformer

# Load environment variables
load_dotenv()

# Custom wrapper for SentenceTransformer to work with Langchain
class LangchainSentenceTransformerEmbeddings(Embeddings):
    def __init__(self, model_name: str):
        self.model = SentenceTransformer(model_name)

    def embed_documents(self, texts: list[str]) -> list[list[float]]:
        # Encode the documents using SentenceTransformer's encode method
        return self.model.encode(texts)

    def embed_query(self, text: str) -> list[float]:
        # Encode a single query using SentenceTransformer's encode method
        return self.model.encode([text])[0]

# Initialize the custom embedding model
embedding_model = LangchainSentenceTransformerEmbeddings("Cheselle/finetuned-arctic-sentence")

# Load the documents using PyMuPDFLoader
ai_framework_document = PyMuPDFLoader(file_path="https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf").load()
ai_blueprint_document = PyMuPDFLoader(file_path="https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf").load()

# Metadata generator function to add metadata to documents
def metadata_generator(document, name):
    fixed_text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=500,
        chunk_overlap=100,
        separators=["\n\n", "\n", ".", "!", "?"]
    )
    collection = fixed_text_splitter.split_documents(document)
    for doc in collection:
        doc.metadata["source"] = name
    return collection

# Generate metadata for the loaded documents
recursive_framework_document = metadata_generator(ai_framework_document, "AI Framework")
recursive_blueprint_document = metadata_generator(ai_blueprint_document, "AI Blueprint")
combined_documents = recursive_framework_document + recursive_blueprint_document

# Combine the content of the documents
ai_framework_text = "".join([doc.page_content for doc in ai_framework_document])
ai_blueprint_text = "".join([doc.page_content for doc in ai_blueprint_document])

# Pass the custom embedding model to Qdrant to create a vectorstore
vectorstore = Qdrant.from_documents(
    documents=combined_documents,  # List of documents
    embedding=embedding_model,     # Custom Langchain wrapper for SentenceTransformer
    location=":memory:",
    collection_name="ai_policy"
)

# Set up the retriever
retriever = vectorstore.as_retriever()

# LLM configuration
llm = ChatOpenAI(model="gpt-4o-mini")

# Define the RAG (Retrieval-Augmented Generation) prompt template
RAG_PROMPT = """\
You are an AI Policy Expert. 
Given a provided context and question, you must answer the question based only on context. 
Think through your answer carefully and step by step. 
Context: {context}
Question: {question}
"""

rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)

# Define the retrieval-augmented QA chain
retrieval_augmented_qa_chain = (
    {"context": itemgetter("question") | retriever, "question": itemgetter("question")}
    | RunnablePassthrough.assign(context=itemgetter("context"))
    | {"response": rag_prompt | llm, "context": itemgetter("context")}
)

# Chainlit event handler for receiving messages
@cl.on_message
async def handle_message(message):
    try:
        # Process the incoming question using the RAG chain
        result = retrieval_augmented_qa_chain.invoke({"question": message.content})

        # Create a new message for the response
        response_message = cl.Message(content=result["response"].content)

        # Send the response back to the user
        await response_message.send()
    
    except Exception as e:
        # Handle any exception and log it or send a response back to the user
        error_message = cl.Message(content=f"An error occurred: {str(e)}")
        await error_message.send()
        print(f"Error occurred: {e}")

# Run the Chainlit server
if __name__ == "__main__":
    try:
        cl.run()
    except Exception as e:
        print(f"Server error occurred: {e}")