MikeCraBash commited on
Commit
1340ce0
·
1 Parent(s): 85af65f
Files changed (3) hide show
  1. app.py +84 -120
  2. pyproject.toml +18 -9
  3. uv.lock +0 -0
app.py CHANGED
@@ -1,139 +1,103 @@
 
 
1
  import os
2
- from typing import List
3
- from chainlit.types import AskFileResponse
4
- from aimakerspace.text_utils import CharacterTextSplitter, TextFileLoader, PDFLoader
5
- from aimakerspace.openai_utils.prompts import (
6
- UserRolePrompt,
7
- SystemRolePrompt,
8
- AssistantRolePrompt,
 
 
 
 
 
 
 
 
 
 
 
9
  )
10
- from aimakerspace.openai_utils.embedding import EmbeddingModel
11
- from aimakerspace.vectordatabase import VectorDatabase
12
- from aimakerspace.openai_utils.chatmodel import ChatOpenAI
13
- import chainlit as cl
14
 
15
- system_template = """\
16
- Use the following context to answer a users question. If you cannot find the answer in the context, say you don't know the answer."""
17
- system_role_prompt = SystemRolePrompt(system_template)
 
 
 
 
 
 
18
 
19
- user_prompt_template = """\
20
  Context:
21
- {context}
22
 
23
- Question:
24
- {question}
25
  """
26
- user_role_prompt = UserRolePrompt(user_prompt_template)
27
-
28
- class RetrievalAugmentedQAPipeline:
29
- def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase) -> None:
30
- self.llm = llm
31
- self.vector_db_retriever = vector_db_retriever
32
-
33
- async def arun_pipeline(self, user_query: str):
34
- context_list = self.vector_db_retriever.search_by_text(user_query, k=4)
35
-
36
- context_prompt = ""
37
- for context in context_list:
38
- context_prompt += context[0] + "\n"
39
-
40
- formatted_system_prompt = system_role_prompt.create_message()
41
-
42
- formatted_user_prompt = user_role_prompt.create_message(question=user_query, context=context_prompt)
43
-
44
- async def generate_response():
45
- async for chunk in self.llm.astream([formatted_system_prompt, formatted_user_prompt]):
46
- yield chunk
47
-
48
- return {"response": generate_response(), "context": context_list}
49
-
50
- text_splitter = CharacterTextSplitter()
51
-
52
-
53
- def process_file(file: AskFileResponse):
54
- import tempfile
55
- import shutil
56
-
57
- print(f"Processing file: {file.name}")
58
-
59
- # Create a temporary file with the correct extension
60
- suffix = f".{file.name.split('.')[-1]}"
61
- with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as temp_file:
62
- # Copy the uploaded file content to the temporary file
63
- shutil.copyfile(file.path, temp_file.name)
64
- print(f"Created temporary file at: {temp_file.name}")
65
-
66
- # Create appropriate loader
67
- if file.name.lower().endswith('.pdf'):
68
- loader = PDFLoader(temp_file.name)
69
- else:
70
- loader = TextFileLoader(temp_file.name)
71
-
72
- try:
73
- # Load and process the documents
74
- documents = loader.load_documents()
75
- texts = text_splitter.split_texts(documents)
76
- return texts
77
- finally:
78
- # Clean up the temporary file
79
- try:
80
- os.unlink(temp_file.name)
81
- except Exception as e:
82
- print(f"Error cleaning up temporary file: {e}")
83
 
 
84
 
85
- @cl.on_chat_start
86
- async def on_chat_start():
87
- files = None
88
-
89
- # Wait for the user to upload a file
90
- while files == None:
91
- files = await cl.AskFileMessage(
92
- content="Please upload a Text or PDF file to begin!",
93
- accept=["text/plain", "application/pdf"],
94
- max_size_mb=2,
95
- timeout=180,
96
- ).send()
97
-
98
- file = files[0]
99
-
100
- msg = cl.Message(
101
- content=f"Processing `{file.name}`..."
102
- )
103
- await msg.send()
104
 
105
- # load the file
106
- texts = process_file(file)
107
 
108
- print(f"Processing {len(texts)} text chunks")
 
 
 
109
 
110
- # Create a dict vector store
111
- vector_db = VectorDatabase()
112
- vector_db = await vector_db.abuild_from_list(texts)
113
-
114
- chat_openai = ChatOpenAI()
115
 
116
- # Create a chain
117
- retrieval_augmented_qa_pipeline = RetrievalAugmentedQAPipeline(
118
- vector_db_retriever=vector_db,
119
- llm=chat_openai
120
- )
121
-
122
- # Let the user know that the system is ready
123
- msg.content = f"Processing `{file.name}` done. You can now ask questions!"
124
- await msg.update()
125
 
126
- cl.user_session.set("chain", retrieval_augmented_qa_pipeline)
 
127
 
 
128
 
129
- @cl.on_message
130
- async def main(message):
131
- chain = cl.user_session.get("chain")
132
 
133
- msg = cl.Message(content="")
134
- result = await chain.arun_pipeline(message.content)
135
 
136
- async for stream_resp in result["response"]:
137
- await msg.stream_token(stream_resp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
- await msg.send()
 
1
+ !pip install -qU langchain-huggingface langchain-community faiss-cpu huggingface-hub==0.27.0
2
+
3
  import os
4
+ import getpass
5
+
6
+ # Load environment variables
7
+ load_dotenv()
8
+
9
+ YOUR_LLM_ENDPOINT_URL = "https://z1nsc3eoo5nxnoos.us-east-1.aws.endpoints.huggingface.cloud"
10
+
11
+ from langchain_huggingface import HuggingFaceEndpoint
12
+
13
+ hf_llm = HuggingFaceEndpoint(
14
+ endpoint_url=f"{YOUR_LLM_ENDPOINT_URL}",
15
+ task="text-generation",
16
+ max_new_tokens=512,
17
+ top_k=10,
18
+ top_p=0.95,
19
+ typical_p=0.95,
20
+ temperature=0.01,
21
+ repetition_penalty=1.03,
22
  )
 
 
 
 
23
 
24
+ from langchain_core.prompts import PromptTemplate
25
+
26
+ RAG_PROMPT_TEMPLATE = """\
27
+ <|start_header_id|>system<|end_header_id|>
28
+ You are a helpful assistant. You answer user questions based on provided context. If you can't answer the question with the provided context, say you don't know.<|eot_id|>
29
+
30
+ <|start_header_id|>user<|end_header_id|>
31
+ User Query:
32
+ {query}
33
 
 
34
  Context:
35
+ {context}<|eot_id|>
36
 
37
+ <|start_header_id|>assistant<|end_header_id|>
 
38
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ rag_prompt = PromptTemplate.from_template(RAG_PROMPT_TEMPLATE)
41
 
42
+ from langchain_huggingface.embeddings import HuggingFaceEndpointEmbeddings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
+ YOUR_EMBED_MODEL_URL = "https://jt4esmqgyp7m3fk8.us-east-1.aws.endpoints.huggingface.cloud"
 
45
 
46
+ hf_embeddings = HuggingFaceEndpointEmbeddings(
47
+ model=YOUR_EMBED_MODEL_URL,
48
+ task="feature-extraction",
49
+ )
50
 
51
+ !git clone https://github.com/dbredvick/paul-graham-to-kindle.git
 
 
 
 
52
 
53
+ from langchain_community.document_loaders import TextLoader
 
 
 
 
 
 
 
 
54
 
55
+ document_loader = TextLoader("./paul-graham-to-kindle/paul_graham_essays.txt")
56
+ documents = document_loader.load()
57
 
58
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
59
 
60
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=30)
61
+ split_documents = text_splitter.split_documents(documents)
62
+ len(split_documents)
63
 
64
+ from langchain_community.vectorstores import FAISS
 
65
 
66
+ for i in range(0, len(split_documents), 32):
67
+ if i == 0:
68
+ vectorstore = FAISS.from_documents(split_documents[i:i+32], hf_embeddings)
69
+ continue
70
+ vectorstore.add_documents(split_documents[i:i+32])
71
+
72
+ hf_retriever = vectorstore.as_retriever()
73
+
74
+ from operator import itemgetter
75
+ from langchain.schema.output_parser import StrOutputParser
76
+ from langchain.schema.runnable import RunnablePassthrough
77
+
78
+ @cl.on_chat_start
79
+ async def start_chat():
80
+ """
81
+ This function will be called at the start of every user session.
82
+
83
+ We will build our LCEL RAG chain here, and store it in the user session.
84
+
85
+ The user session is a dictionary that is unique to each user session, and is stored in the memory of the server.
86
+ """
87
+ ### BUILD LCEL RAG CHAIN THAT ONLY RETURNS TEXT
88
+ lcel_rag_chain = {"context": itemgetter("query") | hf_retriever, "query": itemgetter("query")}| rag_prompt | hf_llm
89
+
90
+ cl.user_session.set("lcel_rag_chain", lcel_rag_chain)
91
+
92
+ @cl.on_message
93
+ async def main(message: cl.Message):
94
+ """
95
+ This function will be called whenever a user sends a message to the bot.
96
+ """
97
+ chainlit_question = message.content
98
+ response = lcel_rag_chain.invoke({"question": chainlit_question})
99
+ chainlit_answer = response["response"].content
100
+
101
+ msg = cl.Message(content=chainlit_answer)
102
+ await msg.send()
103
 
 
pyproject.toml CHANGED
@@ -1,14 +1,23 @@
1
  [project]
2
- name = "aie5-deploypythonicrag"
3
  version = "0.1.0"
4
- description = "Simple Pythonic RAG App"
5
  readme = "README.md"
6
- requires-python = ">=3.13"
7
  dependencies = [
8
- "chainlit>=2.0.4",
9
- "numpy>=2.2.2",
10
- "openai>=1.59.9",
11
- "pydantic==2.10.1",
12
- "pypdf2>=3.0.1",
13
- "websockets>=14.2",
 
 
 
 
 
 
 
 
14
  ]
 
 
1
  [project]
2
+ name = "15-open-source-endpoints"
3
  version = "0.1.0"
4
+ description = "Session 15 - Open Source Endpoints"
5
  readme = "README.md"
6
+ requires-python = ">=3.9"
7
  dependencies = [
8
+ "asyncio===3.4.3",
9
+ "chainlit==2.2.1",
10
+ "huggingface-hub==0.27.0",
11
+ "langchain-huggingface==0.1.2",
12
+ "langchain==0.3.19",
13
+ "langchain-community==0.3.18",
14
+ "langsmith==0.3.11",
15
+ "python-dotenv==1.0.1",
16
+ "tqdm==4.67.1",
17
+ "langchain-openai==0.3.7",
18
+ "langchain-text-splitters==0.3.6",
19
+ "jupyter>=1.1.1",
20
+ "faiss-cpu>=1.10.0",
21
+ "websockets>=15.0",
22
  ]
23
+
uv.lock CHANGED
The diff for this file is too large to render. See raw diff