Broomva commited on
Commit
d68aa74
·
1 Parent(s): 03c2c2d

Added auth options

Browse files
Files changed (2) hide show
  1. app.py +111 -52
  2. chainlit.md +3 -6
app.py CHANGED
@@ -3,10 +3,13 @@ from typing import Dict, Optional
3
 
4
  import chainlit as cl
5
  from chainlit.input_widget import Select, Slider, Switch
 
 
6
  # from chainlit import user_session
7
  from langchain.chains import RetrievalQAWithSourcesChain
8
  from langchain.chat_models import ChatOpenAI
9
  from langchain.embeddings.openai import OpenAIEmbeddings
 
10
  from langchain.prompts.chat import (AIMessagePromptTemplate,
11
  ChatPromptTemplate,
12
  HumanMessagePromptTemplate)
@@ -18,50 +21,82 @@ embeddings = OpenAIEmbeddings()
18
  vector_store = FAISS.load_local("docs.faiss", embeddings)
19
 
20
 
21
- # @cl.oauth_callback
22
- # def oauth_callback(
23
- # provider_id: str,
24
- # token: str,
25
- # raw_user_data: Dict[str, str],
26
- # default_app_user: cl.AppUser,
27
- # ) -> Optional[cl.AppUser]:
28
- # # set AppUser tags as regular_user
29
- # match default_app_user.username:
30
- # case "Broomva":
31
- # default_app_user.tags = ["admin_user"]
32
- # default_app_user.role = "ADMIN"
33
- # case _:
34
- # default_app_user.tags = ["regular_user"]
35
- # default_app_user.role = "USER"
36
- # print(default_app_user)
37
- # return default_app_user
38
-
39
-
40
- # @cl.set_chat_profiles
41
- # async def chat_profile(current_user: cl.AppUser):
42
- # if "admin_user" not in current_user.tags:
43
- # # Default to 3.5 when not admin
44
- # return [
45
- # cl.ChatProfile(
46
- # name="GPT-3.5",
47
- # markdown_description="The underlying LLM model is **GPT-3.5**.",
48
- # icon="https://picsum.photos/200",
49
- # )
50
- # ]
51
-
52
- # return [
53
- # cl.ChatProfile(
54
- # name="GPT-3.5",
55
- # markdown_description="The underlying LLM model is **GPT-3.5**.",
56
- # icon="https://picsum.photos/200",
57
- # ),
58
- # cl.ChatProfile(
59
- # name="GPT-4",
60
- # markdown_description="The underlying LLM model is **GPT-4**.",
61
- # icon="https://picsum.photos/250",
62
- # ),
63
- # ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
  @cl.on_settings_update
67
  async def setup_agent(settings):
@@ -90,7 +125,7 @@ async def init():
90
  Slider(
91
  id="k",
92
  label="RAG - Retrieved Documents",
93
- initial=3,
94
  min=1,
95
  max=20,
96
  step=1,
@@ -98,28 +133,32 @@ async def init():
98
  ]
99
  ).send()
100
 
 
101
 
102
- # print(settings)
103
- # app_user = cl.user_session.get("user")
104
- # chat_profile = cl.user_session.get("chat_profile")
105
- # await cl.Message(
106
- # content=f"🪼 Starting chat with {app_user.username} using the {chat_profile} chat profile"
107
- # ).send()
108
 
109
  chain = RetrievalQAWithSourcesChain.from_chain_type(
110
  ChatOpenAI(temperature=settings['temperature'], streaming=settings['streaming'], model=settings['model']),
111
  chain_type="stuff",
112
  retriever=vector_store.as_retriever(search_kwargs={"k": int(settings['k'])}),
 
113
  )
114
 
 
 
 
115
  cl.user_session.set("chain", chain)
116
 
117
 
118
  @cl.on_message
119
  async def main(message):
120
  chain = cl.user_session.get("chain") # type: RetrievalQAWithSourcesChain
 
121
  cb = cl.AsyncLangchainCallbackHandler(
122
- stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"]
123
  )
124
  cb.answer_reached = True
125
 
@@ -131,4 +170,24 @@ async def main(message):
131
  answer = res["answer"]
132
  await cl.Message(
133
  content=answer,
134
- ).send()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  import chainlit as cl
5
  from chainlit.input_widget import Select, Slider, Switch
6
+ from chainlit.playground.config import add_llm_provider
7
+ from chainlit.playground.providers.langchain import LangchainGenericProvider
8
  # from chainlit import user_session
9
  from langchain.chains import RetrievalQAWithSourcesChain
10
  from langchain.chat_models import ChatOpenAI
11
  from langchain.embeddings.openai import OpenAIEmbeddings
12
+ from langchain.llms import HuggingFaceHub
13
  from langchain.prompts.chat import (AIMessagePromptTemplate,
14
  ChatPromptTemplate,
15
  HumanMessagePromptTemplate)
 
21
  vector_store = FAISS.load_local("docs.faiss", embeddings)
22
 
23
 
24
+ @cl.oauth_callback
25
+ def oauth_callback(
26
+ provider_id: str,
27
+ token: str,
28
+ raw_user_data: Dict[str, str],
29
+ default_app_user: cl.AppUser,
30
+ ) -> Optional[cl.AppUser]:
31
+ # set AppUser tags as regular_user
32
+ match default_app_user.username:
33
+ case "Broomva":
34
+ default_app_user.tags = ["admin_user"]
35
+ default_app_user.role = "ADMIN"
36
+ case _:
37
+ default_app_user.tags = ["regular_user"]
38
+ default_app_user.role = "USER"
39
+ print(default_app_user)
40
+ return default_app_user
41
+
42
+ @cl.header_auth_callback
43
+ def header_auth_callback(headers) -> Optional[cl.AppUser]:
44
+ # Verify the signature of a token in the header (ex: jwt token)
45
+ # or check that the value is matching a row from your database
46
+ print(headers)
47
+ if headers.get("cookie") == "ajs_user_id=5011e946-0d0d-5bd4-a293-65742db98d3d; ajs_anonymous_id=67d2569d-3f50-48f3-beaf-b756286276d9":
48
+ return cl.AppUser(username="Broomva", role="ADMIN", provider="header")
49
+ else:
50
+ return None
51
+
52
+
53
+ @cl.password_auth_callback
54
+ def auth_callback(username: str = 'guest', password: str = 'guest') -> Optional[cl.AppUser]:
55
+ # Fetch the user matching username from your database
56
+ # and compare the hashed password with the value stored in the database
57
+ import hashlib
58
+
59
+ # Create a new sha256 hash object
60
+ hash_object = hashlib.sha256()
61
+
62
+ # Hash the password
63
+ hash_object.update(password.encode())
64
+
65
+ # Get the hexadecimal representation of the hash
66
+ hashed_password = hash_object.hexdigest()
67
+
68
+ if (username, hashed_password) == ("broomva", "b68cacbadaee450b8a8ce2dd44842f1de03ee9993ad97b5e99dea64ef93960ba"):
69
+ return cl.AppUser(username="Broomva", role="ADMIN", provider="credentials")
70
+ elif (username, password) == ("guest", "guest"):
71
+ return cl.AppUser(username="Guest", role="USER", provider="credentials")
72
+ else:
73
+ return None
74
+
75
+ @cl.set_chat_profiles
76
+ async def chat_profile(current_user: cl.AppUser):
77
+ if "ADMIN" not in current_user.role:
78
+ # Default to 3.5 when not admin
79
+ return [
80
+ cl.ChatProfile(
81
+ name="Broomva Book Agent",
82
+ markdown_description="The underlying LLM model is **GPT-3.5**.",
83
+ # icon="https://picsum.photos/200",
84
+ ),
85
+ ]
86
 
87
+ return [
88
+ cl.ChatProfile(
89
+ name="Broomva Book Agent Lite",
90
+ markdown_description="The underlying LLM model is **GPT-3.5**.",
91
+ # icon="https://picsum.photos/200",
92
+ ),
93
+ cl.ChatProfile(
94
+ name="Broomva Book Agent Turbo",
95
+ markdown_description="The underlying LLM model is **GPT-4 Turbo**.",
96
+ # icon="https://picsum.photos/250",
97
+ ),
98
+ ]
99
+
100
 
101
  @cl.on_settings_update
102
  async def setup_agent(settings):
 
125
  Slider(
126
  id="k",
127
  label="RAG - Retrieved Documents",
128
+ initial=5,
129
  min=1,
130
  max=20,
131
  step=1,
 
133
  ]
134
  ).send()
135
 
136
+ chat_profile = cl.user_session.get("chat_profile")
137
 
138
+ if chat_profile == "Broomva Book Agent Lite":
139
+ settings['model'] = "gpt-3.5-turbo"
140
+ elif chat_profile == "Broomva Book Agent Turbo":
141
+ settings['model'] = "gpt-4-1106-preview"
 
 
142
 
143
  chain = RetrievalQAWithSourcesChain.from_chain_type(
144
  ChatOpenAI(temperature=settings['temperature'], streaming=settings['streaming'], model=settings['model']),
145
  chain_type="stuff",
146
  retriever=vector_store.as_retriever(search_kwargs={"k": int(settings['k'])}),
147
+
148
  )
149
 
150
+ cl.user_session.set("settings", settings)
151
+
152
+ print(settings)
153
  cl.user_session.set("chain", chain)
154
 
155
 
156
  @cl.on_message
157
  async def main(message):
158
  chain = cl.user_session.get("chain") # type: RetrievalQAWithSourcesChain
159
+
160
  cb = cl.AsyncLangchainCallbackHandler(
161
+ stream_final_answer=True, #answer_prefix_tokens=["FINAL", "ANSWER"]
162
  )
163
  cb.answer_reached = True
164
 
 
170
  answer = res["answer"]
171
  await cl.Message(
172
  content=answer,
173
+ ).send()
174
+
175
+ # Instantiate the LLM
176
+ llm = HuggingFaceHub(
177
+ model_kwargs={"max_length": 500},
178
+ repo_id="Broomva/bart-large-translation-spa-guc",
179
+ )
180
+
181
+ # Add the LLM provider
182
+ add_llm_provider(
183
+ LangchainGenericProvider(
184
+ # It is important that the id of the provider matches the _llm_type
185
+ id=llm._llm_type,
186
+ # The name is not important. It will be displayed in the UI.
187
+ name="Spa - Guc Translation",
188
+ # This should always be a Langchain llm instance (correctly configured)
189
+ llm=llm,
190
+ # If the LLM works with messages, set this to True
191
+ is_chat=True
192
+ )
193
+ )
chainlit.md CHANGED
@@ -1,11 +1,8 @@
1
  # Welcome to the Broomva Book Chat
2
 
3
  This is the Q&A agent for the Broomva Book. It is a chat interface that uses retrieval to
4
- answer questions leveraging information found in the book.
5
 
6
- Go ahead and ask things like:
7
-
8
- `What is the book about?`
9
  `What is machine learning and deep learning?`
10
- `what is quantum computing?`
11
- `can you explain the meaning of life?`
 
1
  # Welcome to the Broomva Book Chat
2
 
3
  This is the Q&A agent for the Broomva Book. It is a chat interface that uses retrieval to
4
+ answer questions leveraging information found in the book. Go ahead and ask things like:
5
 
 
 
 
6
  `What is machine learning and deep learning?`
7
+
8
+ `what is quantum computing?`