Spaces:
Sleeping
Sleeping
File size: 5,933 Bytes
7590ece 79c6473 7590ece 79c6473 f858425 79c6473 7590ece f858425 7590ece 79c6473 b8d1c52 565ec0d b8d1c52 ddcfef9 b8d1c52 6c0ccf6 b8d1c52 6c0ccf6 b8d1c52 f858425 79c6473 7590ece f858425 7590ece eea0255 79c6473 f858425 79c6473 d68aa74 79c6473 f858425 b8d1c52 f858425 6c0ccf6 b8d1c52 6c0ccf6 b8d1c52 7590ece f858425 7590ece f858425 7590ece d68aa74 7590ece f858425 28af321 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
import os
from typing import Dict, Optional
import chainlit as cl
from chainlit.input_widget import Select, Slider, Switch
# from chainlit.playground.config import add_llm_provider
# from chainlit.playground.providers.langchain import LangchainGenericProvider
# from chainlit import user_session
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
# from langchain.llms import HuggingFaceHub
# from langchain.prompts.chat import (AIMessagePromptTemplate,
# ChatPromptTemplate,
# HumanMessagePromptTemplate)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
embeddings = OpenAIEmbeddings()
vector_store = FAISS.load_local("docs.faiss", embeddings)
@cl.oauth_callback
def oauth_callback(
provider_id: str,
token: str,
raw_user_data: Dict[str, str],
default_app_user: cl.AppUser,
) -> Optional[cl.AppUser]:
# set AppUser tags as regular_user
match default_app_user.username:
case "Broomva":
default_app_user.tags = ["admin_user"]
default_app_user.role = "ADMIN"
case _:
default_app_user.tags = ["regular_user"]
default_app_user.role = "USER"
# print(default_app_user)
return default_app_user
# @cl.header_auth_callback
# def header_auth_callback(headers) -> Optional[cl.AppUser]:
# # Verify the signature of a token in the header (ex: jwt token)
# # or check that the value is matching a row from your database
# # print(headers)
# if (
# headers.get("cookie")
# == "ajs_user_id=5011e946-0d0d-5bd4-a293-65742db98d3d; ajs_anonymous_id=67d2569d-3f50-48f3-beaf-b756286276d9"
# ):
# return cl.AppUser(username="Broomva", role="ADMIN", provider="header")
# else:
# return None
@cl.password_auth_callback
def auth_callback(
username: str = "guest", password: str = "guest"
) -> Optional[cl.AppUser]:
# Fetch the user matching username from your database
# and compare the hashed password with the value stored in the database
import hashlib
# Create a new sha256 hash object
hash_object = hashlib.sha256()
# Hash the password
hash_object.update(password.encode())
# Get the hexadecimal representation of the hash
hashed_password = hash_object.hexdigest()
if (username, hashed_password) == (
"broomva",
"b68cacbadaee450b8a8ce2dd44842f1de03ee9993ad97b5e99dea64ef93960ba",
):
return cl.AppUser(username="Broomva", role="OWNER", provider="credentials", tags = ["admin_user"])
elif (username, password) == ("guest", "guest"):
return cl.AppUser(username="Guest", role="USER", provider="credentials")
else:
return None
@cl.set_chat_profiles
async def chat_profile(current_user: cl.AppUser):
if "ADMIN" not in current_user.role:
# Default to 3.5 when not admin
return [
cl.ChatProfile(
name="Broomva Book Agent",
markdown_description="The underlying LLM model is **GPT-3.5**.",
),
]
return [
cl.ChatProfile(
name="Turbo Agent",
markdown_description="The underlying LLM model is **GPT-3.5**.",
),
cl.ChatProfile(
name="GPT4 Agent",
markdown_description="The underlying LLM model is **GPT-4 Turbo**.",
),
]
@cl.on_settings_update
async def setup_agent(settings):
print("on_settings_update", settings)
@cl.on_chat_start
async def init():
cl.AppUser(username="Broomva", role="ADMIN", provider="header")
settings = await cl.ChatSettings(
[
Select(
id="model",
label="OpenAI - Model",
values=[
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-4",
"gpt-4-1106-preview",
],
initial_index=0,
),
Switch(id="streaming", label="OpenAI - Stream Tokens", initial=True),
Slider(
id="temperature",
label="OpenAI - Temperature",
initial=1,
min=0,
max=2,
step=0.1,
),
Slider(
id="k",
label="RAG - Retrieved Documents",
initial=5,
min=1,
max=20,
step=1,
),
]
).send()
chat_profile = cl.user_session.get("chat_profile")
if chat_profile == "Turbo Agent":
settings["model"] = "gpt-3.5-turbo"
elif chat_profile == "GPT4 Agent":
settings["model"] = "gpt-4-1106-preview"
chain = RetrievalQAWithSourcesChain.from_chain_type(
ChatOpenAI(
temperature=settings["temperature"],
streaming=settings["streaming"],
model=settings["model"],
),
chain_type="stuff",
retriever=vector_store.as_retriever(search_kwargs={"k": int(settings["k"])}),
)
cl.user_session.set("settings", settings)
cl.user_session.set("chain", chain)
@cl.on_message
async def main(message):
chain = cl.user_session.get("chain") # type: RetrievalQAWithSourcesChain
cb = cl.AsyncLangchainCallbackHandler(
stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"]
)
cb.answer_reached = True
res = await chain.acall(message.content, callbacks=[cb])
if cb.has_streamed_final_answer:
await cb.final_stream.update()
else:
answer = res["answer"]
await cl.Message(
content=answer,
).send()
|