Spaces:
Runtime error
Runtime error
File size: 4,274 Bytes
2f6ea1f 7cc3907 8c18b31 7cc3907 70c2a60 2f6ea1f 7cc3907 2f6ea1f 014336b 2db47f1 014336b 00ed004 014336b 2db47f1 014336b 2f6ea1f 70c2a60 2db47f1 70c2a60 8c18b31 31cbd5c 8c18b31 014336b 2f6ea1f 014336b 2f6ea1f 31cbd5c 70c2a60 31cbd5c 70c2a60 31cbd5c 70c2a60 31cbd5c 70c2a60 31cbd5c 70c2a60 2db47f1 70c2a60 31cbd5c 70c2a60 31cbd5c 014336b 8c18b31 7cc3907 70c2a60 014336b 7cc3907 70c2a60 014336b 8c18b31 70c2a60 014336b 70c2a60 8c18b31 014336b 2f6ea1f 014336b 7cc3907 014336b 2f6ea1f 7cc3907 2f6ea1f 014336b 2f6ea1f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
# You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python)
# OpenAI Chat completion
import os
from openai import AsyncOpenAI # importing openai for API usage
import chainlit as cl # importing chainlit for our app
from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
from dotenv import load_dotenv
from aimakerspace.text_utils import PDFFileLoader, CharacterTextSplitter
from aimakerspace.vectordatabase import VectorDatabase
load_dotenv()
# ChatOpenAI Templates
system_template = """You are a Wizzard and everything you say is a spell!
"""
user_template = """{input}
Wizzard, think through your response step by step.
"""
assistant_template = """Use the following context, if any, to help you
answer the user's input, if the answer is not in the context say you don't
know the answer.
CONTEXT:
===============
{context}
===============
Spell away Wizzard!
"""
@cl.on_chat_start # marks a function that will be executed at the start of a user session
async def start_chat():
settings = {
"model": "gpt-3.5-turbo",
"temperature": 0,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
}
cl.user_session.set("settings", settings)
files = None
while files is None:
files = await cl.AskFileMessage(
content="Please upload a PDF file to begin",
accept=["application/pdf"],
max_files=10,
max_size_mb=10,
timeout=60
).send()
# let the user know you are processing the file(s)
await cl.Message(
content="Loading your files..."
).send()
# decode the file
documents = PDFFileLoader(path="", files=files).load_documents()
# split the text into chunks
chunks = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
).split_texts(documents)
print(chunks[0])
# create a vector store
# let the user know you are processing the document(s)
await cl.Message(
content="Creating vector store"
).send()
vector_db = VectorDatabase()
vector_db = await vector_db.abuild_from_list(chunks)
await cl.Message(
content="Done. Ask away!"
).send()
cl.user_session.set("vector_db", vector_db)
@cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
async def main(message: cl.Message):
vector_db = cl.user_session.get("vector_db")
settings = cl.user_session.get("settings")
client = AsyncOpenAI()
print(message.content)
results_list = vector_db.search_by_text(query_text=message.content, k=3, return_as_text=True)
if results_list:
results_string = "\n\n".join(results_list)
else:
results_string = ""
prompt = Prompt(
provider=ChatOpenAI.id,
messages=[
PromptMessage(
role="system",
template=system_template,
formatted=system_template,
),
PromptMessage(
role="user",
template=user_template,
formatted=user_template.format(input=message.content),
),
PromptMessage(
role="assistant",
template=assistant_template,
formatted=assistant_template.format(context=results_string)
)
],
inputs={
"input": message.content,
"context": results_string
},
settings=settings,
)
print([m.to_openai() for m in prompt.messages])
msg = cl.Message(content="")
# Call OpenAI
async for stream_resp in await client.chat.completions.create(
messages=[m.to_openai() for m in prompt.messages], stream=True, **settings
):
token = stream_resp.choices[0].delta.content
if not token:
token = ""
await msg.stream_token(token)
# Update the prompt object with the completion
prompt.completion = msg.content
msg.prompt = prompt
# Send and close the message stream
await msg.send()
|