Spaces:
Sleeping
Sleeping
Serjesh
commited on
Commit
·
04bb9a2
1
Parent(s):
0a298d1
Adding required files
Browse files- Dockerfile +20 -0
- chainlit.md +4 -0
- data/KingLear.txt +0 -0
- lutil/__init__.py +0 -0
- lutil/aimakerspace/__init__.py +0 -0
- lutil/aimakerspace/__pycache__/__init__.cpython-311.pyc +0 -0
- lutil/aimakerspace/__pycache__/text_utils.cpython-311.pyc +0 -0
- lutil/aimakerspace/__pycache__/vectordatabase.cpython-311.pyc +0 -0
- lutil/aimakerspace/openai_utils/__init__.py +0 -0
- lutil/aimakerspace/openai_utils/__pycache__/__init__.cpython-311.pyc +0 -0
- lutil/aimakerspace/openai_utils/__pycache__/chatmodel.cpython-311.pyc +0 -0
- lutil/aimakerspace/openai_utils/__pycache__/embedding.cpython-311.pyc +0 -0
- lutil/aimakerspace/openai_utils/__pycache__/prompts.cpython-311.pyc +0 -0
- lutil/aimakerspace/openai_utils/chatmodel.py +27 -0
- lutil/aimakerspace/openai_utils/embedding.py +52 -0
- lutil/aimakerspace/openai_utils/prompts.py +75 -0
- lutil/aimakerspace/text_utils.py +77 -0
- lutil/aimakerspace/vectordatabase.py +81 -0
- src/app/README.md +13 -0
- src/app/app.py +56 -0
- src/app/raq_qa_reterieval.py +48 -0
- src/app/raq_qa_reterieval_wandb.py +93 -0
- src/app/requirements.txt +10 -0
Dockerfile
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
RUN useradd -m -u 1000 user
|
3 |
+
USER user
|
4 |
+
ENV HOME=/home/user \
|
5 |
+
PATH=/home/user/.local/bin:$PATH
|
6 |
+
RUN mkdir -p $HOME/src/app $HOME/lutil $HOME/data
|
7 |
+
COPY --chown=user /src/app/*.py $HOME/src/app/
|
8 |
+
COPY --chown=user /src/app/*.txt $HOME/src/app/
|
9 |
+
COPY --chown=user *.md $HOME/src/app/
|
10 |
+
RUN ls -r $HOME/src/app
|
11 |
+
COPY --chown=user lutil/ $HOME/lutil/
|
12 |
+
COPY --chown=user data/KingLear.txt $HOME/data/
|
13 |
+
|
14 |
+
WORKDIR $HOME/src/app
|
15 |
+
RUN ls -r $HOME/src/app $HOME/lutil $HOME/data
|
16 |
+
# COPY --chown=user . $HOME/src/app
|
17 |
+
# COPY ./requirements.txt ~/app/requirements.txt
|
18 |
+
RUN pip install -r requirements.txt
|
19 |
+
COPY . .
|
20 |
+
CMD ["chainlit", "run", "app.py", "--port", "7860"]
|
chainlit.md
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# RAG PURE PYTHON
|
2 |
+
|
3 |
+
This app implement RAG using pure python functions
|
4 |
+
|
data/KingLear.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
lutil/__init__.py
ADDED
File without changes
|
lutil/aimakerspace/__init__.py
ADDED
File without changes
|
lutil/aimakerspace/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (263 Bytes). View file
|
|
lutil/aimakerspace/__pycache__/text_utils.cpython-311.pyc
ADDED
Binary file (5.49 kB). View file
|
|
lutil/aimakerspace/__pycache__/vectordatabase.cpython-311.pyc
ADDED
Binary file (5.78 kB). View file
|
|
lutil/aimakerspace/openai_utils/__init__.py
ADDED
File without changes
|
lutil/aimakerspace/openai_utils/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (276 Bytes). View file
|
|
lutil/aimakerspace/openai_utils/__pycache__/chatmodel.cpython-311.pyc
ADDED
Binary file (1.78 kB). View file
|
|
lutil/aimakerspace/openai_utils/__pycache__/embedding.cpython-311.pyc
ADDED
Binary file (3.48 kB). View file
|
|
lutil/aimakerspace/openai_utils/__pycache__/prompts.cpython-311.pyc
ADDED
Binary file (5.53 kB). View file
|
|
lutil/aimakerspace/openai_utils/chatmodel.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
import os
|
4 |
+
|
5 |
+
load_dotenv()
|
6 |
+
|
7 |
+
|
8 |
+
class ChatOpenAI:
|
9 |
+
def __init__(self, model_name: str = "gpt-3.5-turbo"):
|
10 |
+
self.model_name = model_name
|
11 |
+
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
12 |
+
if self.openai_api_key is None:
|
13 |
+
raise ValueError("OPENAI_API_KEY is not set")
|
14 |
+
|
15 |
+
def run(self, messages, text_only: bool = True):
|
16 |
+
if not isinstance(messages, list):
|
17 |
+
raise ValueError("messages must be a list")
|
18 |
+
|
19 |
+
openai.api_key = self.openai_api_key
|
20 |
+
response = openai.ChatCompletion.create(
|
21 |
+
model=self.model_name, messages=messages
|
22 |
+
)
|
23 |
+
|
24 |
+
if text_only:
|
25 |
+
return response.choices[0].message.content
|
26 |
+
|
27 |
+
return response
|
lutil/aimakerspace/openai_utils/embedding.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dotenv import load_dotenv
|
2 |
+
from openai.embeddings_utils import (
|
3 |
+
get_embeddings,
|
4 |
+
aget_embeddings,
|
5 |
+
get_embedding,
|
6 |
+
aget_embedding,
|
7 |
+
)
|
8 |
+
import openai
|
9 |
+
from typing import List
|
10 |
+
import os
|
11 |
+
import asyncio
|
12 |
+
|
13 |
+
|
14 |
+
class EmbeddingModel:
|
15 |
+
def __init__(self, embeddings_model_name: str = "text-embedding-ada-002"):
|
16 |
+
load_dotenv()
|
17 |
+
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
18 |
+
|
19 |
+
if self.openai_api_key is None:
|
20 |
+
raise ValueError(
|
21 |
+
"OPENAI_API_KEY environment variable is not set. Please set it to your OpenAI API key."
|
22 |
+
)
|
23 |
+
openai.api_key = self.openai_api_key
|
24 |
+
self.embeddings_model_name = embeddings_model_name
|
25 |
+
|
26 |
+
async def async_get_embeddings(self, list_of_text: List[str]) -> List[List[float]]:
|
27 |
+
return await aget_embeddings(
|
28 |
+
list_of_text=list_of_text, engine=self.embeddings_model_name
|
29 |
+
)
|
30 |
+
|
31 |
+
async def async_get_embedding(self, text: str) -> List[float]:
|
32 |
+
return await aget_embedding(text=text, engine=self.embeddings_model_name)
|
33 |
+
|
34 |
+
def get_embeddings(self, list_of_text: List[str]) -> List[List[float]]:
|
35 |
+
return get_embeddings(
|
36 |
+
list_of_text=list_of_text, engine=self.embeddings_model_name
|
37 |
+
)
|
38 |
+
|
39 |
+
def get_embedding(self, text: str) -> List[float]:
|
40 |
+
return get_embedding(text=text, engine=self.embeddings_model_name)
|
41 |
+
|
42 |
+
|
43 |
+
if __name__ == "__main__":
|
44 |
+
embedding_model = EmbeddingModel()
|
45 |
+
print(embedding_model.get_embedding("Hello, world!"))
|
46 |
+
print(embedding_model.get_embeddings(["Hello, world!", "Goodbye, world!"]))
|
47 |
+
print(asyncio.run(embedding_model.async_get_embedding("Hello, world!")))
|
48 |
+
print(
|
49 |
+
asyncio.run(
|
50 |
+
embedding_model.async_get_embeddings(["Hello, world!", "Goodbye, world!"])
|
51 |
+
)
|
52 |
+
)
|
lutil/aimakerspace/openai_utils/prompts.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
|
4 |
+
class BasePrompt:
|
5 |
+
def __init__(self, prompt):
|
6 |
+
"""
|
7 |
+
Initializes the BasePrompt object with a prompt template.
|
8 |
+
|
9 |
+
:param prompt: A string that can contain placeholders within curly braces
|
10 |
+
"""
|
11 |
+
self.prompt = prompt
|
12 |
+
self._pattern = re.compile(r"\{([^}]+)\}")
|
13 |
+
|
14 |
+
def format_prompt(self, **kwargs):
|
15 |
+
"""
|
16 |
+
Formats the prompt string using the keyword arguments provided.
|
17 |
+
|
18 |
+
:param kwargs: The values to substitute into the prompt string
|
19 |
+
:return: The formatted prompt string
|
20 |
+
"""
|
21 |
+
matches = self._pattern.findall(self.prompt)
|
22 |
+
return self.prompt.format(**{match: kwargs.get(match, "") for match in matches})
|
23 |
+
|
24 |
+
def get_input_variables(self):
|
25 |
+
"""
|
26 |
+
Gets the list of input variable names from the prompt string.
|
27 |
+
|
28 |
+
:return: List of input variable names
|
29 |
+
"""
|
30 |
+
return self._pattern.findall(self.prompt)
|
31 |
+
|
32 |
+
|
33 |
+
class RolePrompt(BasePrompt):
|
34 |
+
def __init__(self, prompt, role: str):
|
35 |
+
"""
|
36 |
+
Initializes the RolePrompt object with a prompt template and a role.
|
37 |
+
|
38 |
+
:param prompt: A string that can contain placeholders within curly braces
|
39 |
+
:param role: The role for the message ('system', 'user', or 'assistant')
|
40 |
+
"""
|
41 |
+
super().__init__(prompt)
|
42 |
+
self.role = role
|
43 |
+
|
44 |
+
def create_message(self, **kwargs):
|
45 |
+
"""
|
46 |
+
Creates a message dictionary with a role and a formatted message.
|
47 |
+
|
48 |
+
:param kwargs: The values to substitute into the prompt string
|
49 |
+
:return: Dictionary containing the role and the formatted message
|
50 |
+
"""
|
51 |
+
return {"role": self.role, "content": self.format_prompt(**kwargs)}
|
52 |
+
|
53 |
+
|
54 |
+
class SystemRolePrompt(RolePrompt):
|
55 |
+
def __init__(self, prompt: str):
|
56 |
+
super().__init__(prompt, "system")
|
57 |
+
|
58 |
+
|
59 |
+
class UserRolePrompt(RolePrompt):
|
60 |
+
def __init__(self, prompt: str):
|
61 |
+
super().__init__(prompt, "user")
|
62 |
+
|
63 |
+
|
64 |
+
class AssistantRolePrompt(RolePrompt):
|
65 |
+
def __init__(self, prompt: str):
|
66 |
+
super().__init__(prompt, "assistant")
|
67 |
+
|
68 |
+
|
69 |
+
if __name__ == "__main__":
|
70 |
+
prompt = BasePrompt("Hello {name}, you are {age} years old")
|
71 |
+
print(prompt.format_prompt(name="John", age=30))
|
72 |
+
|
73 |
+
prompt = SystemRolePrompt("Hello {name}, you are {age} years old")
|
74 |
+
print(prompt.create_message(name="John", age=30))
|
75 |
+
print(prompt.get_input_variables())
|
lutil/aimakerspace/text_utils.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
|
5 |
+
class TextFileLoader:
|
6 |
+
def __init__(self, path: str, encoding: str = "utf-8"):
|
7 |
+
self.documents = []
|
8 |
+
self.path = path
|
9 |
+
self.encoding = encoding
|
10 |
+
|
11 |
+
def load(self):
|
12 |
+
if os.path.isdir(self.path):
|
13 |
+
self.load_directory()
|
14 |
+
elif os.path.isfile(self.path) and self.path.endswith(".txt"):
|
15 |
+
self.load_file()
|
16 |
+
else:
|
17 |
+
raise ValueError(
|
18 |
+
"Provided path is neither a valid directory nor a .txt file."
|
19 |
+
)
|
20 |
+
|
21 |
+
def load_file(self):
|
22 |
+
with open(self.path, "r", encoding=self.encoding) as f:
|
23 |
+
self.documents.append(f.read())
|
24 |
+
|
25 |
+
def load_directory(self):
|
26 |
+
for root, _, files in os.walk(self.path):
|
27 |
+
for file in files:
|
28 |
+
if file.endswith(".txt"):
|
29 |
+
with open(
|
30 |
+
os.path.join(root, file), "r", encoding=self.encoding
|
31 |
+
) as f:
|
32 |
+
self.documents.append(f.read())
|
33 |
+
|
34 |
+
def load_documents(self):
|
35 |
+
self.load()
|
36 |
+
return self.documents
|
37 |
+
|
38 |
+
|
39 |
+
class CharacterTextSplitter:
|
40 |
+
def __init__(
|
41 |
+
self,
|
42 |
+
chunk_size: int = 1000,
|
43 |
+
chunk_overlap: int = 200,
|
44 |
+
):
|
45 |
+
assert (
|
46 |
+
chunk_size > chunk_overlap
|
47 |
+
), "Chunk size must be greater than chunk overlap"
|
48 |
+
|
49 |
+
self.chunk_size = chunk_size
|
50 |
+
self.chunk_overlap = chunk_overlap
|
51 |
+
|
52 |
+
def split(self, text: str) -> List[str]:
|
53 |
+
chunks = []
|
54 |
+
for i in range(0, len(text), self.chunk_size - self.chunk_overlap):
|
55 |
+
chunks.append(text[i : i + self.chunk_size])
|
56 |
+
return chunks
|
57 |
+
|
58 |
+
def split_texts(self, texts: List[str]) -> List[str]:
|
59 |
+
chunks = []
|
60 |
+
for text in texts:
|
61 |
+
chunks.extend(self.split(text))
|
62 |
+
return chunks
|
63 |
+
|
64 |
+
|
65 |
+
if __name__ == "__main__":
|
66 |
+
loader = TextFileLoader("data/KingLear.txt")
|
67 |
+
loader.load()
|
68 |
+
splitter = CharacterTextSplitter()
|
69 |
+
chunks = splitter.split_texts(loader.documents)
|
70 |
+
print(len(chunks))
|
71 |
+
print(chunks[0])
|
72 |
+
print("--------")
|
73 |
+
print(chunks[1])
|
74 |
+
print("--------")
|
75 |
+
print(chunks[-2])
|
76 |
+
print("--------")
|
77 |
+
print(chunks[-1])
|
lutil/aimakerspace/vectordatabase.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from collections import defaultdict
|
3 |
+
from typing import List, Tuple, Callable
|
4 |
+
from aimakerspace.openai_utils.embedding import EmbeddingModel
|
5 |
+
import asyncio
|
6 |
+
|
7 |
+
|
8 |
+
def cosine_similarity(vector_a: np.array, vector_b: np.array) -> float:
|
9 |
+
"""Computes the cosine similarity between two vectors."""
|
10 |
+
dot_product = np.dot(vector_a, vector_b)
|
11 |
+
norm_a = np.linalg.norm(vector_a)
|
12 |
+
norm_b = np.linalg.norm(vector_b)
|
13 |
+
return dot_product / (norm_a * norm_b)
|
14 |
+
|
15 |
+
|
16 |
+
class VectorDatabase:
|
17 |
+
def __init__(self, embedding_model: EmbeddingModel = None):
|
18 |
+
self.vectors = defaultdict(np.array)
|
19 |
+
self.embedding_model = embedding_model or EmbeddingModel()
|
20 |
+
|
21 |
+
def insert(self, key: str, vector: np.array) -> None:
|
22 |
+
self.vectors[key] = vector
|
23 |
+
|
24 |
+
def search(
|
25 |
+
self,
|
26 |
+
query_vector: np.array,
|
27 |
+
k: int,
|
28 |
+
distance_measure: Callable = cosine_similarity,
|
29 |
+
) -> List[Tuple[str, float]]:
|
30 |
+
scores = [
|
31 |
+
(key, distance_measure(query_vector, vector))
|
32 |
+
for key, vector in self.vectors.items()
|
33 |
+
]
|
34 |
+
return sorted(scores, key=lambda x: x[1], reverse=True)[:k]
|
35 |
+
|
36 |
+
def search_by_text(
|
37 |
+
self,
|
38 |
+
query_text: str,
|
39 |
+
k: int,
|
40 |
+
distance_measure: Callable = cosine_similarity,
|
41 |
+
return_as_text: bool = False,
|
42 |
+
) -> List[Tuple[str, float]]:
|
43 |
+
query_vector = self.embedding_model.get_embedding(query_text)
|
44 |
+
results = self.search(query_vector, k, distance_measure)
|
45 |
+
return [result[0] for result in results] if return_as_text else results
|
46 |
+
|
47 |
+
def retrieve_from_key(self, key: str) -> np.array:
|
48 |
+
return self.vectors.get(key, None)
|
49 |
+
|
50 |
+
async def abuild_from_list(self, list_of_text: List[str]) -> "VectorDatabase":
|
51 |
+
embeddings = await self.embedding_model.async_get_embeddings(list_of_text)
|
52 |
+
for text, embedding in zip(list_of_text, embeddings):
|
53 |
+
self.insert(text, np.array(embedding))
|
54 |
+
return self
|
55 |
+
|
56 |
+
|
57 |
+
if __name__ == "__main__":
|
58 |
+
list_of_text = [
|
59 |
+
"I like to eat broccoli and bananas.",
|
60 |
+
"I ate a banana and spinach smoothie for breakfast.",
|
61 |
+
"Chinchillas and kittens are cute.",
|
62 |
+
"My sister adopted a kitten yesterday.",
|
63 |
+
"Look at this cute hamster munching on a piece of broccoli.",
|
64 |
+
]
|
65 |
+
|
66 |
+
vector_db = VectorDatabase()
|
67 |
+
vector_db = asyncio.run(vector_db.abuild_from_list(list_of_text))
|
68 |
+
k = 2
|
69 |
+
|
70 |
+
searched_vector = vector_db.search_by_text("I think fruit is awesome!", k=k)
|
71 |
+
print(f"Closest {k} vector(s):", searched_vector)
|
72 |
+
|
73 |
+
retrieved_vector = vector_db.retrieve_from_key(
|
74 |
+
"I like to eat broccoli and bananas."
|
75 |
+
)
|
76 |
+
print("Retrieved vector:", retrieved_vector)
|
77 |
+
|
78 |
+
relevant_texts = vector_db.search_by_text(
|
79 |
+
"I think fruit is awesome!", k=k, return_as_text=True
|
80 |
+
)
|
81 |
+
print(f"Closest {k} text(s):", relevant_texts)
|
src/app/README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# SIMPLE CHATGPT CONTAINERISE ON HUGGING FACE
|
2 |
+
|
3 |
+
Basic RAG application in python
|
4 |
+
|
5 |
+
## Docker command to build the app
|
6 |
+
|
7 |
+
```bash
|
8 |
+
docker build . -t serarm1/llmops-aispacemaker:rag_pure_python
|
9 |
+
docker push serarm1/llmops-aispacemaker:rag_pure_python
|
10 |
+
docker run -p 7860:7860 -e OPENAI_API_KEY=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 -e WANDB_API_KEY=2e767e94472e0f5b8c0f7a1e6e1fa53df5dd03d5 serarm1/llmops-aispacemaker:rag_pure_python
|
11 |
+
```
|
12 |
+
|
13 |
+
[Hugging Face App: SIMPLE CHAINLIT APP](https://huggingface.co/spaces/Serjesh/simple_chainlit_app)
|
src/app/app.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python)
|
2 |
+
import sys
|
3 |
+
import os
|
4 |
+
sys.path.append('../../lutil')
|
5 |
+
import openai # importing openai for API usage
|
6 |
+
import chainlit as cl # importing chainlit for our app
|
7 |
+
from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
|
8 |
+
from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
|
9 |
+
from dotenv import load_dotenv
|
10 |
+
from aimakerspace.text_utils import TextFileLoader, CharacterTextSplitter
|
11 |
+
from aimakerspace.vectordatabase import VectorDatabase
|
12 |
+
import asyncio
|
13 |
+
from raq_qa_reterieval_wandb import RetrievalAugmentedQAPipeline,raqa_prompt,user_prompt
|
14 |
+
from aimakerspace.openai_utils.chatmodel import ChatOpenAI
|
15 |
+
import wandb
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
load_dotenv()
|
21 |
+
openai.api_key = os.environ["OPENAI_API_KEY"]
|
22 |
+
os.environ["WANDB_API_KEY"] = os.environ["WANDB_API_KEY"]
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
@cl.on_chat_start # marks a function that will be executed at the start of a user session
|
27 |
+
async def start_chat():
|
28 |
+
msg = cl.Message(
|
29 |
+
content=f"Loading Dataset ...", disable_human_feedback=True
|
30 |
+
)
|
31 |
+
await msg.send()
|
32 |
+
text_loader = TextFileLoader("../../data/KingLear.txt")
|
33 |
+
documents = text_loader.load_documents()
|
34 |
+
text_splitter = CharacterTextSplitter()
|
35 |
+
split_documents = text_splitter.split_texts(documents)
|
36 |
+
vector_db = VectorDatabase()
|
37 |
+
vector_db = asyncio.run(vector_db.abuild_from_list(split_documents))
|
38 |
+
chat_openai = ChatOpenAI()
|
39 |
+
retrieval_augmented_qa_pipeline = RetrievalAugmentedQAPipeline(
|
40 |
+
vector_db_retriever=vector_db,
|
41 |
+
llm=chat_openai,
|
42 |
+
wandb_project="RAQ ins pure python")
|
43 |
+
msg.content = f"Dataset loading is done. You can now ask questions!"
|
44 |
+
await msg.update()
|
45 |
+
cl.user_session.set("chain", retrieval_augmented_qa_pipeline)
|
46 |
+
|
47 |
+
@cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
|
48 |
+
async def main(message: str):
|
49 |
+
# settings = cl.user_session.get("settings")
|
50 |
+
chain = cl.user_session.get("chain")
|
51 |
+
|
52 |
+
output = chain.run_pipeline(message)
|
53 |
+
print(output)
|
54 |
+
msg = cl.Message(content=f"{output}")
|
55 |
+
# msg.prompt = output
|
56 |
+
await msg.send()
|
src/app/raq_qa_reterieval.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import sys
|
3 |
+
sys.path.append('../lutil/')
|
4 |
+
from aimakerspace.openai_utils.prompts import (
|
5 |
+
UserRolePrompt,
|
6 |
+
SystemRolePrompt,
|
7 |
+
AssistantRolePrompt,
|
8 |
+
)
|
9 |
+
from aimakerspace.openai_utils.chatmodel import ChatOpenAI
|
10 |
+
from aimakerspace.vectordatabase import VectorDatabase
|
11 |
+
|
12 |
+
RAQA_PROMPT_TEMPLATE = """
|
13 |
+
Use the provided context to answer the user's query.
|
14 |
+
|
15 |
+
You may not answer the user's query unless there is specific context in the following text.
|
16 |
+
|
17 |
+
If you do not know the answer, or cannot answer, please respond with "I don't know".
|
18 |
+
|
19 |
+
Context:
|
20 |
+
{context}
|
21 |
+
"""
|
22 |
+
|
23 |
+
raqa_prompt = SystemRolePrompt(RAQA_PROMPT_TEMPLATE)
|
24 |
+
|
25 |
+
USER_PROMPT_TEMPLATE = """
|
26 |
+
User Query:
|
27 |
+
{user_query}
|
28 |
+
"""
|
29 |
+
|
30 |
+
user_prompt = UserRolePrompt(USER_PROMPT_TEMPLATE)
|
31 |
+
|
32 |
+
class RetrievalAugmentedQAPipeline:
|
33 |
+
def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase) -> None:
|
34 |
+
self.llm = llm
|
35 |
+
self.vector_db_retriever = vector_db_retriever
|
36 |
+
|
37 |
+
def run_pipeline(self, user_query: str) -> str:
|
38 |
+
context_list = self.vector_db_retriever.search_by_text(user_query, k=4)
|
39 |
+
|
40 |
+
context_prompt = ""
|
41 |
+
for context in context_list:
|
42 |
+
context_prompt += context[0] + "\n"
|
43 |
+
|
44 |
+
formatted_system_prompt = raqa_prompt.create_message(context=context_prompt)
|
45 |
+
|
46 |
+
formatted_user_prompt = user_prompt.create_message(user_query=user_query)
|
47 |
+
|
48 |
+
return self.llm.run([formatted_system_prompt, formatted_user_prompt])
|
src/app/raq_qa_reterieval_wandb.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import sys
|
3 |
+
sys.path.append('../../lutil/')
|
4 |
+
from aimakerspace.openai_utils.prompts import (
|
5 |
+
UserRolePrompt,
|
6 |
+
SystemRolePrompt,
|
7 |
+
AssistantRolePrompt,
|
8 |
+
)
|
9 |
+
from aimakerspace.openai_utils.chatmodel import ChatOpenAI
|
10 |
+
from aimakerspace.vectordatabase import VectorDatabase
|
11 |
+
import datetime
|
12 |
+
from wandb.sdk.data_types.trace_tree import Trace
|
13 |
+
from wandb import init as winit
|
14 |
+
|
15 |
+
RAQA_PROMPT_TEMPLATE = """
|
16 |
+
Use the provided context to answer the user's query.
|
17 |
+
|
18 |
+
You may not answer the user's query unless there is specific context in the following text.
|
19 |
+
|
20 |
+
If you do not know the answer, or cannot answer, please respond with "I don't know".
|
21 |
+
|
22 |
+
Context:
|
23 |
+
{context}
|
24 |
+
"""
|
25 |
+
|
26 |
+
raqa_prompt = SystemRolePrompt(RAQA_PROMPT_TEMPLATE)
|
27 |
+
|
28 |
+
USER_PROMPT_TEMPLATE = """
|
29 |
+
User Query:
|
30 |
+
{user_query}
|
31 |
+
"""
|
32 |
+
|
33 |
+
user_prompt = UserRolePrompt(USER_PROMPT_TEMPLATE)
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
class RetrievalAugmentedQAPipeline:
|
38 |
+
def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase, wandb_project = None) -> None:
|
39 |
+
self.llm = llm
|
40 |
+
self.vector_db_retriever = vector_db_retriever
|
41 |
+
self.wandb_project = wandb_project
|
42 |
+
winit(project="RAQ ins pure python")
|
43 |
+
|
44 |
+
def run_pipeline(self, user_query: str) -> str:
|
45 |
+
context_list = self.vector_db_retriever.search_by_text(user_query, k=4)
|
46 |
+
|
47 |
+
context_prompt = ""
|
48 |
+
for context in context_list:
|
49 |
+
context_prompt += context[0] + "\n"
|
50 |
+
|
51 |
+
formatted_system_prompt = raqa_prompt.create_message(context=context_prompt)
|
52 |
+
|
53 |
+
formatted_user_prompt = user_prompt.create_message(user_query=user_query)
|
54 |
+
|
55 |
+
|
56 |
+
start_time = datetime.datetime.now().timestamp() * 1000
|
57 |
+
|
58 |
+
try:
|
59 |
+
openai_response = self.llm.run([formatted_system_prompt, formatted_user_prompt], text_only=False)
|
60 |
+
end_time = datetime.datetime.now().timestamp() * 1000
|
61 |
+
status = "success"
|
62 |
+
status_message = (None, )
|
63 |
+
response_text = openai_response.choices[0].message.content
|
64 |
+
token_usage = openai_response["usage"].to_dict()
|
65 |
+
model = openai_response["model"]
|
66 |
+
|
67 |
+
except Exception as e:
|
68 |
+
end_time = datetime.datetime.now().timestamp() * 1000
|
69 |
+
status = "error"
|
70 |
+
status_message = str(e)
|
71 |
+
response_text = ""
|
72 |
+
token_usage = {}
|
73 |
+
model = ""
|
74 |
+
|
75 |
+
if self.wandb_project:
|
76 |
+
root_span = Trace(
|
77 |
+
name="root_span",
|
78 |
+
kind="llm",
|
79 |
+
status_code=status,
|
80 |
+
status_message=status_message,
|
81 |
+
start_time_ms=start_time,
|
82 |
+
end_time_ms=end_time,
|
83 |
+
metadata={
|
84 |
+
"token_usage" : token_usage,
|
85 |
+
"model_name" : model
|
86 |
+
},
|
87 |
+
inputs= {"system_prompt" : formatted_system_prompt, "user_prompt" : formatted_user_prompt},
|
88 |
+
outputs= {"response" : response_text}
|
89 |
+
)
|
90 |
+
|
91 |
+
root_span.log(name="openai_trace")
|
92 |
+
|
93 |
+
return response_text if response_text else "We ran into an error. Please try again later. Full Error Message: " + status_message
|
src/app/requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chainlit==0.7.0
|
2 |
+
openai==0.28.0
|
3 |
+
python-dotenv==1.0.0
|
4 |
+
wandb
|
5 |
+
numpy
|
6 |
+
pandas
|
7 |
+
matplotlib
|
8 |
+
plotly
|
9 |
+
scipy
|
10 |
+
scikit-learn
|