File size: 2,846 Bytes
c46b6cd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
# You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python)
# OpenAI Chat completion
import os
from openai import AsyncOpenAI # importing openai for API usage
import chainlit as cl # importing chainlit for our app
from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
#from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
from aimakerspace.openai_utils.chatmodel import ChatOpenAI
from dotenv import load_dotenv
from chainlit.types import AskFileResponse
from aimakerspace.text_utils import TextFileLoader, CharacterTextSplitter
from aimakerspace.vectordatabase import VectorDatabase
import asyncio
from aimakerspace.rag_utils.raqa import RetrievalAugmentedQAPipeline
load_dotenv()
# ChatOpenAI Templates
system_template = """You are a helpful assistant who always speaks in a pleasant tone!
"""
user_template = """{input}
Think through your response step by step.
"""
def transform_file(file: AskFileResponse):
import tempfile
with tempfile.NamedTemporaryFile(mode="w", delete=False) as tempfile:
with open(tempfile.name, "wb") as f:
f.write(file.content)
# load the file
print(tempfile.name)
text_loader_pdf = TextFileLoader(tempfile.name)
documents = text_loader_pdf.load_documents()
text_splitter = CharacterTextSplitter()
split_documents = text_splitter.split_texts(documents)
return split_documents
@cl.on_chat_start # marks a function that will be executed at the start of a user session
async def start_chat():
files = None
while files == None:
files = await cl.AskFileMessage(
content="Please upload a PDF file to begin!",
accept=["application/pdf"],
max_size_mb=20,
timeout=180,
).send()
file = files[0]
msg = cl.Message(
content=f"Processing `{file.name}`...", disable_human_feedback=True
)
await msg.send()
# load the file
documents = transform_file(file)
vector_uefa_db = VectorDatabase()
vector_uefa_db = asyncio.run(vector_uefa_db.abuild_from_list(documents))
retrieval_augmented_qa_pipeline = RetrievalAugmentedQAPipeline(
vector_db_retriever=vector_uefa_db,
llm=ChatOpenAI()
)
# Let the user know that the system is ready
msg.content = f"Processing `{file.name}` done. You can now ask questions!"
await msg.update()
cl.user_session.set("chain", retrieval_augmented_qa_pipeline)
@cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
async def main(message: cl.Message):
chain = cl.user_session.get("chain")
resp = chain.run_pipeline(message.content)
msg = cl.Message(content=resp["response"])
await msg.send()
|