XThomasBU commited on
Commit
33adc14
1 Parent(s): 91c4a28

initial working commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +13 -0
  2. README.md +6 -0
  3. apps/ai_tutor/.chainlit/config.toml +120 -0
  4. apps/ai_tutor/.chainlit/translations/en-US.json +229 -0
  5. apps/ai_tutor/.chainlit/translations/zh-CN.json +229 -0
  6. apps/ai_tutor/README.md +12 -0
  7. apps/ai_tutor/__pycache__/app.cpython-311.pyc +0 -0
  8. apps/ai_tutor/__pycache__/chainlit_app.cpython-311.pyc +0 -0
  9. apps/ai_tutor/__pycache__/helpers.cpython-311.pyc +0 -0
  10. apps/ai_tutor/app.py +390 -0
  11. apps/ai_tutor/chainlit_app.py +563 -0
  12. apps/ai_tutor/config/__pycache__/config_manager.cpython-311.pyc +0 -0
  13. apps/ai_tutor/config/__pycache__/constants.cpython-311.pyc +0 -0
  14. apps/ai_tutor/config/__pycache__/prompts.cpython-311.pyc +0 -0
  15. apps/ai_tutor/config/config.yml +60 -0
  16. apps/ai_tutor/config/config_manager.py +189 -0
  17. apps/ai_tutor/config/constants.py +26 -0
  18. apps/ai_tutor/config/project_config.yml +20 -0
  19. apps/ai_tutor/config/prompts.py +97 -0
  20. apps/ai_tutor/encrypt_students.py +53 -0
  21. apps/ai_tutor/helpers.py +90 -0
  22. apps/ai_tutor/private/placeholder_students_file.json +5 -0
  23. apps/ai_tutor/public/assets/images/avatars/ai-tutor.png +0 -0
  24. apps/ai_tutor/public/assets/images/avatars/ai_tutor.png +0 -0
  25. apps/ai_tutor/public/assets/images/starter_icons/acastusphoton-svgrepo-com.svg +2 -0
  26. apps/ai_tutor/public/assets/images/starter_icons/adv-screen-recorder-svgrepo-com.svg +2 -0
  27. apps/ai_tutor/public/assets/images/starter_icons/alarmy-svgrepo-com.svg +2 -0
  28. apps/ai_tutor/public/assets/images/starter_icons/calendar-samsung-17-svgrepo-com.svg +36 -0
  29. apps/ai_tutor/public/files/students_encrypted.json +1 -0
  30. apps/ai_tutor/public/files/test.css +32 -0
  31. apps/ai_tutor/public/logo_dark.png +0 -0
  32. apps/ai_tutor/public/logo_light.png +0 -0
  33. apps/ai_tutor/storage/data/urls.txt +1 -0
  34. apps/ai_tutor/templates/cooldown.html +181 -0
  35. apps/ai_tutor/templates/dashboard.html +145 -0
  36. apps/ai_tutor/templates/error.html +95 -0
  37. apps/ai_tutor/templates/error_404.html +80 -0
  38. apps/ai_tutor/templates/login.html +132 -0
  39. apps/ai_tutor/templates/logout.html +21 -0
  40. apps/ai_tutor/templates/unauthorized.html +94 -0
  41. apps/chainlit_base/.chainlit/config.toml +120 -0
  42. apps/chainlit_base/chainlit.md +14 -0
  43. apps/chainlit_base/chainlit_base.py +381 -0
  44. apps/chainlit_base/config/config.yml +60 -0
  45. apps/chainlit_base/config/config_manager.py +174 -0
  46. apps/chainlit_base/config/project_config.yml +10 -0
  47. apps/chainlit_base/config/prompts.py +97 -0
  48. apps/chainlit_base/public/assets/images/avatars/ai-tutor.png +0 -0
  49. apps/chainlit_base/public/assets/images/avatars/ai_tutor.png +0 -0
  50. apps/chainlit_base/public/assets/images/starter_icons/acastusphoton-svgrepo-com.svg +2 -0
.gitignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **/vectorstores/*
2
+ **/private/students.json
3
+ .ragatouille/*
4
+ */__pycache__/*
5
+ .chainlit/translations/
6
+ code/.chainlit/translations/
7
+ storage/logs/*
8
+ vectorstores/*
9
+ **/apps/*/storage/logs/*
10
+ **/apps/*/private/*
11
+ *.log
12
+ **/.files/*
13
+ .env
README.md CHANGED
@@ -1 +1,7 @@
1
  ## App Templates
 
 
 
 
 
 
 
1
  ## App Templates
2
+
3
+ ```bash
4
+ pip install edubotics-core
5
+ ```
6
+
7
+ P.S. - Do remmber to setup your .env file
apps/ai_tutor/.chainlit/config.toml ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+
6
+ # List of environment variables to be provided by each user to use the app.
7
+ user_env = []
8
+
9
+ # Duration (in seconds) during which the session is saved when the connection is lost
10
+ session_timeout = 3600
11
+
12
+ # Enable third parties caching (e.g LangChain cache)
13
+ cache = false
14
+
15
+ # Authorized origins
16
+ allow_origins = ["*"]
17
+
18
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
19
+ # follow_symlink = false
20
+
21
+ [features]
22
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
23
+ unsafe_allow_html = true
24
+
25
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
26
+ latex = true
27
+
28
+ # Automatically tag threads with the current chat profile (if a chat profile is used)
29
+ auto_tag_thread = true
30
+
31
+ # Authorize users to spontaneously upload files with messages
32
+ [features.spontaneous_file_upload]
33
+ enabled = true
34
+ accept = ["*/*"]
35
+ max_files = 20
36
+ max_size_mb = 500
37
+
38
+ [features.audio]
39
+ # Threshold for audio recording
40
+ min_decibels = -45
41
+ # Delay for the user to start speaking in MS
42
+ initial_silence_timeout = 3000
43
+ # Delay for the user to continue speaking in MS. If the user stops speaking for this duration, the recording will stop.
44
+ silence_timeout = 1500
45
+ # Above this duration (MS), the recording will forcefully stop.
46
+ max_duration = 15000
47
+ # Duration of the audio chunks in MS
48
+ chunk_duration = 1000
49
+ # Sample rate of the audio
50
+ sample_rate = 44100
51
+
52
+ edit_message = true
53
+
54
+ [UI]
55
+ # Name of the assistant.
56
+ name = "AI Tutor"
57
+
58
+ # Description of the assistant. This is used for HTML tags.
59
+ # description = ""
60
+
61
+ # Large size content are by default collapsed for a cleaner ui
62
+ default_collapse_content = true
63
+
64
+ # Chain of Thought (CoT) display mode. Can be "hidden", "tool_call" or "full".
65
+ cot = "hidden"
66
+
67
+ # Link to your github repo. This will add a github button in the UI's header.
68
+ github = "https://github.com/edubotics-ai/edubot-core"
69
+
70
+ # Specify a CSS file that can be used to customize the user interface.
71
+ # The CSS file can be served from the public directory or via an external link.
72
+ custom_css = "/public/files/test.css"
73
+
74
+ # Specify a Javascript file that can be used to customize the user interface.
75
+ # The Javascript file can be served from the public directory.
76
+ # custom_js = "/public/test.js"
77
+
78
+ # Specify a custom font url.
79
+ # custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap"
80
+
81
+ # Specify a custom meta image url.
82
+ custom_meta_image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/f/f5/Boston_University_seal.svg/1200px-Boston_University_seal.svg.png"
83
+
84
+ # Specify a custom build directory for the frontend.
85
+ # This can be used to customize the frontend code.
86
+ # Be careful: If this is a relative path, it should not start with a slash.
87
+ # custom_build = "./public/build"
88
+
89
+ [UI.theme]
90
+ default = "light"
91
+ #layout = "wide"
92
+ #font_family = "Inter, sans-serif"
93
+ # Override default MUI light theme. (Check theme.ts)
94
+ [UI.theme.light]
95
+ #background = "#FAFAFA"
96
+ #paper = "#FFFFFF"
97
+
98
+ [UI.theme.light.primary]
99
+ #main = "#F80061"
100
+ #dark = "#980039"
101
+ #light = "#FFE7EB"
102
+ [UI.theme.light.text]
103
+ #primary = "#212121"
104
+ #secondary = "#616161"
105
+
106
+ # Override default MUI dark theme. (Check theme.ts)
107
+ [UI.theme.dark]
108
+ #background = "#FAFAFA"
109
+ #paper = "#FFFFFF"
110
+
111
+ [UI.theme.dark.primary]
112
+ #main = "#F80061"
113
+ #dark = "#980039"
114
+ #light = "#FFE7EB"
115
+ [UI.theme.dark.text]
116
+ #primary = "#EEEEEE"
117
+ #secondary = "#BDBDBD"
118
+
119
+ [meta]
120
+ generated_by = "1.1.402"
apps/ai_tutor/.chainlit/translations/en-US.json ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "components": {
3
+ "atoms": {
4
+ "buttons": {
5
+ "userButton": {
6
+ "menu": {
7
+ "settings": "Settings",
8
+ "settingsKey": "S",
9
+ "APIKeys": "API Keys",
10
+ "logout": "Logout"
11
+ }
12
+ }
13
+ }
14
+ },
15
+ "molecules": {
16
+ "newChatButton": {
17
+ "newChat": "New Chat"
18
+ },
19
+ "tasklist": {
20
+ "TaskList": {
21
+ "title": "\ud83d\uddd2\ufe0f Task List",
22
+ "loading": "Loading...",
23
+ "error": "An error occurred"
24
+ }
25
+ },
26
+ "attachments": {
27
+ "cancelUpload": "Cancel upload",
28
+ "removeAttachment": "Remove attachment"
29
+ },
30
+ "newChatDialog": {
31
+ "createNewChat": "Create new chat?",
32
+ "clearChat": "This will clear the current messages and start a new chat.",
33
+ "cancel": "Cancel",
34
+ "confirm": "Confirm"
35
+ },
36
+ "settingsModal": {
37
+ "settings": "Settings",
38
+ "expandMessages": "Expand Messages",
39
+ "hideChainOfThought": "Hide Chain of Thought",
40
+ "darkMode": "Dark Mode"
41
+ },
42
+ "detailsButton": {
43
+ "using": "Using",
44
+ "used": "Used"
45
+ },
46
+ "auth": {
47
+ "authLogin": {
48
+ "title": "Login to access the app.",
49
+ "form": {
50
+ "email": "Email address",
51
+ "password": "Password",
52
+ "noAccount": "Don't have an account?",
53
+ "alreadyHaveAccount": "Already have an account?",
54
+ "signup": "Sign Up",
55
+ "signin": "Sign In",
56
+ "or": "OR",
57
+ "continue": "Continue",
58
+ "forgotPassword": "Forgot password?",
59
+ "passwordMustContain": "Your password must contain:",
60
+ "emailRequired": "email is a required field",
61
+ "passwordRequired": "password is a required field"
62
+ },
63
+ "error": {
64
+ "default": "Unable to sign in.",
65
+ "signin": "Try signing in with a different account.",
66
+ "oauthsignin": "Try signing in with a different account.",
67
+ "redirect_uri_mismatch": "The redirect URI is not matching the oauth app configuration.",
68
+ "oauthcallbackerror": "Try signing in with a different account.",
69
+ "oauthcreateaccount": "Try signing in with a different account.",
70
+ "emailcreateaccount": "Try signing in with a different account.",
71
+ "callback": "Try signing in with a different account.",
72
+ "oauthaccountnotlinked": "To confirm your identity, sign in with the same account you used originally.",
73
+ "emailsignin": "The e-mail could not be sent.",
74
+ "emailverify": "Please verify your email, a new email has been sent.",
75
+ "credentialssignin": "Sign in failed. Check the details you provided are correct.",
76
+ "sessionrequired": "Please sign in to access this page."
77
+ }
78
+ },
79
+ "authVerifyEmail": {
80
+ "almostThere": "You're almost there! We've sent an email to ",
81
+ "verifyEmailLink": "Please click on the link in that email to complete your signup.",
82
+ "didNotReceive": "Can't find the email?",
83
+ "resendEmail": "Resend email",
84
+ "goBack": "Go Back",
85
+ "emailSent": "Email sent successfully.",
86
+ "verifyEmail": "Verify your email address"
87
+ },
88
+ "providerButton": {
89
+ "continue": "Continue with {{provider}}",
90
+ "signup": "Sign up with {{provider}}"
91
+ },
92
+ "authResetPassword": {
93
+ "newPasswordRequired": "New password is a required field",
94
+ "passwordsMustMatch": "Passwords must match",
95
+ "confirmPasswordRequired": "Confirm password is a required field",
96
+ "newPassword": "New password",
97
+ "confirmPassword": "Confirm password",
98
+ "resetPassword": "Reset Password"
99
+ },
100
+ "authForgotPassword": {
101
+ "email": "Email address",
102
+ "emailRequired": "email is a required field",
103
+ "emailSent": "Please check the email address {{email}} for instructions to reset your password.",
104
+ "enterEmail": "Enter your email address and we will send you instructions to reset your password.",
105
+ "resendEmail": "Resend email",
106
+ "continue": "Continue",
107
+ "goBack": "Go Back"
108
+ }
109
+ }
110
+ },
111
+ "organisms": {
112
+ "chat": {
113
+ "history": {
114
+ "index": {
115
+ "showHistory": "Show history",
116
+ "lastInputs": "Last Inputs",
117
+ "noInputs": "Such empty...",
118
+ "loading": "Loading..."
119
+ }
120
+ },
121
+ "inputBox": {
122
+ "input": {
123
+ "placeholder": "Type your message here..."
124
+ },
125
+ "speechButton": {
126
+ "start": "Start recording",
127
+ "stop": "Stop recording"
128
+ },
129
+ "SubmitButton": {
130
+ "sendMessage": "Send message",
131
+ "stopTask": "Stop Task"
132
+ },
133
+ "UploadButton": {
134
+ "attachFiles": "Attach files"
135
+ },
136
+ "waterMark": {
137
+ "text": "Built with"
138
+ }
139
+ },
140
+ "Messages": {
141
+ "index": {
142
+ "running": "Running",
143
+ "executedSuccessfully": "executed successfully",
144
+ "failed": "failed",
145
+ "feedbackUpdated": "Feedback updated",
146
+ "updating": "Updating"
147
+ }
148
+ },
149
+ "dropScreen": {
150
+ "dropYourFilesHere": "Drop your files here"
151
+ },
152
+ "index": {
153
+ "failedToUpload": "Failed to upload",
154
+ "cancelledUploadOf": "Cancelled upload of",
155
+ "couldNotReachServer": "Could not reach the server",
156
+ "continuingChat": "Continuing previous chat"
157
+ },
158
+ "settings": {
159
+ "settingsPanel": "Settings panel",
160
+ "reset": "Reset",
161
+ "cancel": "Cancel",
162
+ "confirm": "Confirm"
163
+ }
164
+ },
165
+ "threadHistory": {
166
+ "sidebar": {
167
+ "filters": {
168
+ "FeedbackSelect": {
169
+ "feedbackAll": "Feedback: All",
170
+ "feedbackPositive": "Feedback: Positive",
171
+ "feedbackNegative": "Feedback: Negative"
172
+ },
173
+ "SearchBar": {
174
+ "search": "Search"
175
+ }
176
+ },
177
+ "DeleteThreadButton": {
178
+ "confirmMessage": "This will delete the thread as well as it's messages and elements.",
179
+ "cancel": "Cancel",
180
+ "confirm": "Confirm",
181
+ "deletingChat": "Deleting chat",
182
+ "chatDeleted": "Chat deleted"
183
+ },
184
+ "index": {
185
+ "pastChats": "Past Chats"
186
+ },
187
+ "ThreadList": {
188
+ "empty": "Empty...",
189
+ "today": "Today",
190
+ "yesterday": "Yesterday",
191
+ "previous7days": "Previous 7 days",
192
+ "previous30days": "Previous 30 days"
193
+ },
194
+ "TriggerButton": {
195
+ "closeSidebar": "Close sidebar",
196
+ "openSidebar": "Open sidebar"
197
+ }
198
+ },
199
+ "Thread": {
200
+ "backToChat": "Go back to chat",
201
+ "chatCreatedOn": "This chat was created on"
202
+ }
203
+ },
204
+ "header": {
205
+ "chat": "Chat",
206
+ "readme": "Readme"
207
+ }
208
+ }
209
+ },
210
+ "hooks": {
211
+ "useLLMProviders": {
212
+ "failedToFetchProviders": "Failed to fetch providers:"
213
+ }
214
+ },
215
+ "pages": {
216
+ "Design": {},
217
+ "Env": {
218
+ "savedSuccessfully": "Saved successfully",
219
+ "requiredApiKeys": "Required API Keys",
220
+ "requiredApiKeysInfo": "To use this app, the following API keys are required. The keys are stored on your device's local storage."
221
+ },
222
+ "Page": {
223
+ "notPartOfProject": "You are not part of this project."
224
+ },
225
+ "ResumeButton": {
226
+ "resumeChat": "Resume Chat"
227
+ }
228
+ }
229
+ }
apps/ai_tutor/.chainlit/translations/zh-CN.json ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "components": {
3
+ "atoms": {
4
+ "buttons": {
5
+ "userButton": {
6
+ "menu": {
7
+ "settings": "\u8bbe\u7f6e",
8
+ "settingsKey": "S",
9
+ "APIKeys": "API \u5bc6\u94a5",
10
+ "logout": "\u767b\u51fa"
11
+ }
12
+ }
13
+ }
14
+ },
15
+ "molecules": {
16
+ "newChatButton": {
17
+ "newChat": "\u65b0\u5efa\u5bf9\u8bdd"
18
+ },
19
+ "tasklist": {
20
+ "TaskList": {
21
+ "title": "\ud83d\uddd2\ufe0f \u4efb\u52a1\u5217\u8868",
22
+ "loading": "\u52a0\u8f7d\u4e2d...",
23
+ "error": "\u53d1\u751f\u9519\u8bef"
24
+ }
25
+ },
26
+ "attachments": {
27
+ "cancelUpload": "\u53d6\u6d88\u4e0a\u4f20",
28
+ "removeAttachment": "\u79fb\u9664\u9644\u4ef6"
29
+ },
30
+ "newChatDialog": {
31
+ "createNewChat": "\u521b\u5efa\u65b0\u5bf9\u8bdd\uff1f",
32
+ "clearChat": "\u8fd9\u5c06\u6e05\u9664\u5f53\u524d\u6d88\u606f\u5e76\u5f00\u59cb\u65b0\u7684\u5bf9\u8bdd\u3002",
33
+ "cancel": "\u53d6\u6d88",
34
+ "confirm": "\u786e\u8ba4"
35
+ },
36
+ "settingsModal": {
37
+ "settings": "\u8bbe\u7f6e",
38
+ "expandMessages": "\u5c55\u5f00\u6d88\u606f",
39
+ "hideChainOfThought": "\u9690\u85cf\u601d\u8003\u94fe",
40
+ "darkMode": "\u6697\u8272\u6a21\u5f0f"
41
+ },
42
+ "detailsButton": {
43
+ "using": "\u4f7f\u7528",
44
+ "used": "\u5df2\u7528"
45
+ },
46
+ "auth": {
47
+ "authLogin": {
48
+ "title": "\u767b\u5f55\u4ee5\u8bbf\u95ee\u5e94\u7528\u3002",
49
+ "form": {
50
+ "email": "\u7535\u5b50\u90ae\u7bb1\u5730\u5740",
51
+ "password": "\u5bc6\u7801",
52
+ "noAccount": "\u6ca1\u6709\u8d26\u6237\uff1f",
53
+ "alreadyHaveAccount": "\u5df2\u6709\u8d26\u6237\uff1f",
54
+ "signup": "\u6ce8\u518c",
55
+ "signin": "\u767b\u5f55",
56
+ "or": "\u6216\u8005",
57
+ "continue": "\u7ee7\u7eed",
58
+ "forgotPassword": "\u5fd8\u8bb0\u5bc6\u7801\uff1f",
59
+ "passwordMustContain": "\u60a8\u7684\u5bc6\u7801\u5fc5\u987b\u5305\u542b\uff1a",
60
+ "emailRequired": "\u7535\u5b50\u90ae\u7bb1\u662f\u5fc5\u586b\u9879",
61
+ "passwordRequired": "\u5bc6\u7801\u662f\u5fc5\u586b\u9879"
62
+ },
63
+ "error": {
64
+ "default": "\u65e0\u6cd5\u767b\u5f55\u3002",
65
+ "signin": "\u5c1d\u8bd5\u4f7f\u7528\u4e0d\u540c\u7684\u8d26\u6237\u767b\u5f55\u3002",
66
+ "oauthsignin": "\u5c1d\u8bd5\u4f7f\u7528\u4e0d\u540c\u7684\u8d26\u6237\u767b\u5f55\u3002",
67
+ "redirect_uri_mismatch": "\u91cd\u5b9a\u5411URI\u4e0eOAuth\u5e94\u7528\u914d\u7f6e\u4e0d\u5339\u914d\u3002",
68
+ "oauthcallbackerror": "\u5c1d\u8bd5\u4f7f\u7528\u4e0d\u540c\u7684\u8d26\u6237\u767b\u5f55\u3002",
69
+ "oauthcreateaccount": "\u5c1d\u8bd5\u4f7f\u7528\u4e0d\u540c\u7684\u8d26\u6237\u767b\u5f55\u3002",
70
+ "emailcreateaccount": "\u5c1d\u8bd5\u4f7f\u7528\u4e0d\u540c\u7684\u8d26\u6237\u767b\u5f55\u3002",
71
+ "callback": "\u5c1d\u8bd5\u4f7f\u7528\u4e0d\u540c\u7684\u8d26\u6237\u767b\u5f55\u3002",
72
+ "oauthaccountnotlinked": "\u4e3a\u4e86\u9a8c\u8bc1\u60a8\u7684\u8eab\u4efd\uff0c\u8bf7\u4f7f\u7528\u6700\u521d\u4f7f\u7528\u7684\u540c\u4e00\u8d26\u6237\u767b\u5f55\u3002",
73
+ "emailsignin": "\u65e0\u6cd5\u53d1\u9001\u90ae\u4ef6\u3002",
74
+ "emailverify": "\u8bf7\u9a8c\u8bc1\u60a8\u7684\u7535\u5b50\u90ae\u4ef6\uff0c\u5df2\u53d1\u9001\u4e00\u5c01\u65b0\u90ae\u4ef6\u3002",
75
+ "credentialssignin": "\u767b\u5f55\u5931\u8d25\u3002\u8bf7\u68c0\u67e5\u60a8\u63d0\u4f9b\u7684\u8be6\u7ec6\u4fe1\u606f\u662f\u5426\u6b63\u786e\u3002",
76
+ "sessionrequired": "\u8bf7\u767b\u5f55\u4ee5\u8bbf\u95ee\u6b64\u9875\u9762\u3002"
77
+ }
78
+ },
79
+ "authVerifyEmail": {
80
+ "almostThere": "\u60a8\u5feb\u6210\u529f\u4e86\uff01\u6211\u4eec\u5df2\u5411 ",
81
+ "verifyEmailLink": "\u8bf7\u5355\u51fb\u8be5\u90ae\u4ef6\u4e2d\u7684\u94fe\u63a5\u4ee5\u5b8c\u6210\u6ce8\u518c\u3002",
82
+ "didNotReceive": "\u6ca1\u627e\u5230\u90ae\u4ef6\uff1f",
83
+ "resendEmail": "\u91cd\u65b0\u53d1\u9001\u90ae\u4ef6",
84
+ "goBack": "\u8fd4\u56de",
85
+ "emailSent": "\u90ae\u4ef6\u5df2\u6210\u529f\u53d1\u9001\u3002",
86
+ "verifyEmail": "\u9a8c\u8bc1\u60a8\u7684\u7535\u5b50\u90ae\u4ef6\u5730\u5740"
87
+ },
88
+ "providerButton": {
89
+ "continue": "\u4f7f\u7528{{provider}}\u7ee7\u7eed",
90
+ "signup": "\u4f7f\u7528{{provider}}\u6ce8\u518c"
91
+ },
92
+ "authResetPassword": {
93
+ "newPasswordRequired": "\u65b0\u5bc6\u7801\u662f\u5fc5\u586b\u9879",
94
+ "passwordsMustMatch": "\u5bc6\u7801\u5fc5\u987b\u4e00\u81f4",
95
+ "confirmPasswordRequired": "\u786e\u8ba4\u5bc6\u7801\u662f\u5fc5\u586b\u9879",
96
+ "newPassword": "\u65b0\u5bc6\u7801",
97
+ "confirmPassword": "\u786e\u8ba4\u5bc6\u7801",
98
+ "resetPassword": "\u91cd\u7f6e\u5bc6\u7801"
99
+ },
100
+ "authForgotPassword": {
101
+ "email": "\u7535\u5b50\u90ae\u7bb1\u5730\u5740",
102
+ "emailRequired": "\u7535\u5b50\u90ae\u7bb1\u662f\u5fc5\u586b\u9879",
103
+ "emailSent": "\u8bf7\u68c0\u67e5\u7535\u5b50\u90ae\u7bb1{{email}}\u4ee5\u83b7\u53d6\u91cd\u7f6e\u5bc6\u7801\u7684\u6307\u793a\u3002",
104
+ "enterEmail": "\u8bf7\u8f93\u5165\u60a8\u7684\u7535\u5b50\u90ae\u7bb1\u5730\u5740\uff0c\u6211\u4eec\u5c06\u53d1\u9001\u91cd\u7f6e\u5bc6\u7801\u7684\u6307\u793a\u3002",
105
+ "resendEmail": "\u91cd\u65b0\u53d1\u9001\u90ae\u4ef6",
106
+ "continue": "\u7ee7\u7eed",
107
+ "goBack": "\u8fd4\u56de"
108
+ }
109
+ }
110
+ },
111
+ "organisms": {
112
+ "chat": {
113
+ "history": {
114
+ "index": {
115
+ "showHistory": "\u663e\u793a\u5386\u53f2",
116
+ "lastInputs": "\u6700\u540e\u8f93\u5165",
117
+ "noInputs": "\u5982\u6b64\u7a7a\u65f7...",
118
+ "loading": "\u52a0\u8f7d\u4e2d..."
119
+ }
120
+ },
121
+ "inputBox": {
122
+ "input": {
123
+ "placeholder": "\u5728\u8fd9\u91cc\u8f93\u5165\u60a8\u7684\u6d88\u606f..."
124
+ },
125
+ "speechButton": {
126
+ "start": "\u5f00\u59cb\u5f55\u97f3",
127
+ "stop": "\u505c\u6b62\u5f55\u97f3"
128
+ },
129
+ "SubmitButton": {
130
+ "sendMessage": "\u53d1\u9001\u6d88\u606f",
131
+ "stopTask": "\u505c\u6b62\u4efb\u52a1"
132
+ },
133
+ "UploadButton": {
134
+ "attachFiles": "\u9644\u52a0\u6587\u4ef6"
135
+ },
136
+ "waterMark": {
137
+ "text": "\u4f7f\u7528"
138
+ }
139
+ },
140
+ "Messages": {
141
+ "index": {
142
+ "running": "\u8fd0\u884c\u4e2d",
143
+ "executedSuccessfully": "\u6267\u884c\u6210\u529f",
144
+ "failed": "\u5931\u8d25",
145
+ "feedbackUpdated": "\u53cd\u9988\u66f4\u65b0",
146
+ "updating": "\u6b63\u5728\u66f4\u65b0"
147
+ }
148
+ },
149
+ "dropScreen": {
150
+ "dropYourFilesHere": "\u5728\u8fd9\u91cc\u62d6\u653e\u60a8\u7684\u6587\u4ef6"
151
+ },
152
+ "index": {
153
+ "failedToUpload": "\u4e0a\u4f20\u5931\u8d25",
154
+ "cancelledUploadOf": "\u53d6\u6d88\u4e0a\u4f20",
155
+ "couldNotReachServer": "\u65e0\u6cd5\u8fde\u63a5\u5230\u670d\u52a1\u5668",
156
+ "continuingChat": "\u7ee7\u7eed\u4e4b\u524d\u7684\u5bf9\u8bdd"
157
+ },
158
+ "settings": {
159
+ "settingsPanel": "\u8bbe\u7f6e\u9762\u677f",
160
+ "reset": "\u91cd\u7f6e",
161
+ "cancel": "\u53d6\u6d88",
162
+ "confirm": "\u786e\u8ba4"
163
+ }
164
+ },
165
+ "threadHistory": {
166
+ "sidebar": {
167
+ "filters": {
168
+ "FeedbackSelect": {
169
+ "feedbackAll": "\u53cd\u9988\uff1a\u5168\u90e8",
170
+ "feedbackPositive": "\u53cd\u9988\uff1a\u6b63\u9762",
171
+ "feedbackNegative": "\u53cd\u9988\uff1a\u8d1f\u9762"
172
+ },
173
+ "SearchBar": {
174
+ "search": "\u641c\u7d22"
175
+ }
176
+ },
177
+ "DeleteThreadButton": {
178
+ "confirmMessage": "\u8fd9\u5c06\u5220\u9664\u7ebf\u7a0b\u53ca\u5176\u6d88\u606f\u548c\u5143\u7d20\u3002",
179
+ "cancel": "\u53d6\u6d88",
180
+ "confirm": "\u786e\u8ba4",
181
+ "deletingChat": "\u5220\u9664\u5bf9\u8bdd",
182
+ "chatDeleted": "\u5bf9\u8bdd\u5df2\u5220\u9664"
183
+ },
184
+ "index": {
185
+ "pastChats": "\u8fc7\u5f80\u5bf9\u8bdd"
186
+ },
187
+ "ThreadList": {
188
+ "empty": "\u7a7a\u7684...",
189
+ "today": "\u4eca\u5929",
190
+ "yesterday": "\u6628\u5929",
191
+ "previous7days": "\u524d7\u5929",
192
+ "previous30days": "\u524d30\u5929"
193
+ },
194
+ "TriggerButton": {
195
+ "closeSidebar": "\u5173\u95ed\u4fa7\u8fb9\u680f",
196
+ "openSidebar": "\u6253\u5f00\u4fa7\u8fb9\u680f"
197
+ }
198
+ },
199
+ "Thread": {
200
+ "backToChat": "\u8fd4\u56de\u5bf9\u8bdd",
201
+ "chatCreatedOn": "\u6b64\u5bf9\u8bdd\u521b\u5efa\u4e8e"
202
+ }
203
+ },
204
+ "header": {
205
+ "chat": "\u5bf9\u8bdd",
206
+ "readme": "\u8bf4\u660e"
207
+ }
208
+ }
209
+ },
210
+ "hooks": {
211
+ "useLLMProviders": {
212
+ "failedToFetchProviders": "\u83b7\u53d6\u63d0\u4f9b\u8005\u5931\u8d25:"
213
+ }
214
+ },
215
+ "pages": {
216
+ "Design": {},
217
+ "Env": {
218
+ "savedSuccessfully": "\u4fdd\u5b58\u6210\u529f",
219
+ "requiredApiKeys": "\u5fc5\u9700\u7684API\u5bc6\u94a5",
220
+ "requiredApiKeysInfo": "\u8981\u4f7f\u7528\u6b64\u5e94\u7528\uff0c\u9700\u8981\u4ee5\u4e0bAPI\u5bc6\u94a5\u3002\u8fd9\u4e9b\u5bc6\u94a5\u5b58\u50a8\u5728\u60a8\u7684\u8bbe\u5907\u672c\u5730\u5b58\u50a8\u4e2d\u3002"
221
+ },
222
+ "Page": {
223
+ "notPartOfProject": "\u60a8\u4e0d\u662f\u6b64\u9879\u76ee\u7684\u4e00\u90e8\u5206\u3002"
224
+ },
225
+ "ResumeButton": {
226
+ "resumeChat": "\u6062\u590d\u5bf9\u8bdd"
227
+ }
228
+ }
229
+ }
apps/ai_tutor/README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # WIP
2
+
3
+
4
+ ## Run the encrypt_students script
5
+
6
+ - If you don't want the emails to be public, run this script to encrypt the emails of the students.
7
+ - This will create a new file in the public/files/ directory.
8
+ - Place your file with the students' emails in the private/ directory (do not commit this file to the repository).
9
+
10
+ ```bash
11
+ python encrypt_students.py --students-file private/students.json --encrypted-students-file public/files/students_encrypted.json
12
+ ```
apps/ai_tutor/__pycache__/app.cpython-311.pyc ADDED
Binary file (17.9 kB). View file
 
apps/ai_tutor/__pycache__/chainlit_app.cpython-311.pyc ADDED
Binary file (25.3 kB). View file
 
apps/ai_tutor/__pycache__/helpers.cpython-311.pyc ADDED
Binary file (3.96 kB). View file
 
apps/ai_tutor/app.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request, Response, HTTPException
2
+ from fastapi.responses import HTMLResponse, RedirectResponse
3
+ from fastapi.templating import Jinja2Templates
4
+ from google.oauth2 import id_token
5
+ from google.auth.transport import requests as google_requests
6
+ from google_auth_oauthlib.flow import Flow
7
+ from chainlit.utils import mount_chainlit
8
+ import secrets
9
+ import json
10
+ import base64
11
+ from config.constants import (
12
+ OAUTH_GOOGLE_CLIENT_ID,
13
+ OAUTH_GOOGLE_CLIENT_SECRET,
14
+ CHAINLIT_URL,
15
+ EMAIL_ENCRYPTION_KEY,
16
+ )
17
+ from fastapi.middleware.cors import CORSMiddleware
18
+ from fastapi.staticfiles import StaticFiles
19
+ from helpers import (
20
+ get_time,
21
+ reset_tokens_for_user,
22
+ check_user_cooldown,
23
+ )
24
+ from edubotics_core.chat_processor.helpers import get_user_details, update_user_info
25
+ from config.config_manager import config_manager
26
+ import hashlib
27
+
28
+ # set config
29
+ config = config_manager.get_config().dict()
30
+
31
+ # set constants
32
+ GITHUB_REPO = config["misc"]["github_repo"]
33
+ DOCS_WEBSITE = config["misc"]["docs_website"]
34
+ ALL_TIME_TOKENS_ALLOCATED = config["token_config"]["all_time_tokens_allocated"]
35
+ TOKENS_LEFT = config["token_config"]["tokens_left"]
36
+ COOLDOWN_TIME = config["token_config"]["cooldown_time"]
37
+ REGEN_TIME = config["token_config"]["regen_time"]
38
+
39
+ GOOGLE_CLIENT_ID = OAUTH_GOOGLE_CLIENT_ID
40
+ GOOGLE_CLIENT_SECRET = OAUTH_GOOGLE_CLIENT_SECRET
41
+ GOOGLE_REDIRECT_URI = f"{CHAINLIT_URL}/auth/oauth/google/callback"
42
+
43
+ app = FastAPI()
44
+ app.mount("/public", StaticFiles(directory="public"), name="public")
45
+ app.add_middleware(
46
+ CORSMiddleware,
47
+ allow_origins=["*"], # Update with appropriate origins
48
+ allow_methods=["*"],
49
+ allow_headers=["*"], # or specify the headers you want to allow
50
+ expose_headers=["X-User-Info"], # Expose the custom header
51
+ )
52
+
53
+ templates = Jinja2Templates(directory="templates")
54
+ session_store = {}
55
+ CHAINLIT_PATH = "/chainlit_tutor"
56
+
57
+ # only admin is given any additional permissions for now -- no limits on tokens
58
+ with open("public/files/students_encrypted.json", "r") as file:
59
+ USER_ROLES = json.load(file)
60
+
61
+ # Create a Google OAuth flow
62
+ flow = Flow.from_client_config(
63
+ {
64
+ "web": {
65
+ "client_id": GOOGLE_CLIENT_ID,
66
+ "client_secret": GOOGLE_CLIENT_SECRET,
67
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
68
+ "token_uri": "https://oauth2.googleapis.com/token",
69
+ "redirect_uris": [GOOGLE_REDIRECT_URI],
70
+ "scopes": [
71
+ "openid",
72
+ # "https://www.googleapis.com/auth/userinfo.email",
73
+ # "https://www.googleapis.com/auth/userinfo.profile",
74
+ ],
75
+ }
76
+ },
77
+ scopes=[
78
+ "openid",
79
+ "https://www.googleapis.com/auth/userinfo.email",
80
+ "https://www.googleapis.com/auth/userinfo.profile",
81
+ ],
82
+ redirect_uri=GOOGLE_REDIRECT_URI,
83
+ )
84
+
85
+
86
+ def get_user_role(username: str):
87
+
88
+ # Function to deterministically hash emails
89
+ def deterministic_hash(email, salt):
90
+ return hashlib.pbkdf2_hmac("sha256", email.encode(), salt, 100000).hex()
91
+
92
+ # encrypt email (#FIXME: this is not the best way to do this, not really encryption, more like a hash)
93
+ encryption_salt = EMAIL_ENCRYPTION_KEY.encode()
94
+ encrypted_email = deterministic_hash(username, encryption_salt)
95
+ role = USER_ROLES.get(encrypted_email, ["guest"])
96
+
97
+ if "guest" in role:
98
+ return "unauthorized"
99
+
100
+ return role
101
+
102
+
103
+ async def get_user_info_from_cookie(request: Request):
104
+ user_info_encoded = request.cookies.get("X-User-Info")
105
+ if user_info_encoded:
106
+ try:
107
+ user_info_json = base64.b64decode(user_info_encoded).decode()
108
+ return json.loads(user_info_json)
109
+ except Exception as e:
110
+ print(f"Error decoding user info: {e}")
111
+ return None
112
+ return None
113
+
114
+
115
+ async def del_user_info_from_cookie(request: Request, response: Response):
116
+ # Delete cookies from the response
117
+ response.delete_cookie("X-User-Info")
118
+ response.delete_cookie("session_token")
119
+ # Get the session token from the request cookies
120
+ session_token = request.cookies.get("session_token")
121
+ # Check if the session token exists in the session_store before deleting
122
+ if session_token and session_token in session_store:
123
+ del session_store[session_token]
124
+
125
+
126
+ def get_user_info(request: Request):
127
+ session_token = request.cookies.get("session_token")
128
+ if session_token and session_token in session_store:
129
+ return session_store[session_token]
130
+ return None
131
+
132
+
133
+ @app.get("/", response_class=HTMLResponse)
134
+ async def login_page(request: Request):
135
+ user_info = await get_user_info_from_cookie(request)
136
+ if user_info and user_info.get("google_signed_in"):
137
+ return RedirectResponse("/post-signin")
138
+ return templates.TemplateResponse(
139
+ "login.html",
140
+ {"request": request, "GITHUB_REPO": GITHUB_REPO, "DOCS_WEBSITE": DOCS_WEBSITE},
141
+ )
142
+
143
+
144
+ # @app.get("/login/guest")
145
+ # async def login_guest():
146
+ # username = "guest"
147
+ # session_token = secrets.token_hex(16)
148
+ # unique_session_id = secrets.token_hex(8)
149
+ # username = f"{username}_{unique_session_id}"
150
+ # session_store[session_token] = {
151
+ # "email": username,
152
+ # "name": "Guest",
153
+ # "profile_image": "",
154
+ # "google_signed_in": False, # Ensure guest users do not have this flag
155
+ # }
156
+ # user_info_json = json.dumps(session_store[session_token])
157
+ # user_info_encoded = base64.b64encode(user_info_json.encode()).decode()
158
+
159
+ # # Set cookies
160
+ # response = RedirectResponse(url="/post-signin", status_code=303)
161
+ # response.set_cookie(key="session_token", value=session_token)
162
+ # response.set_cookie(key="X-User-Info", value=user_info_encoded, httponly=True)
163
+ # return response
164
+
165
+
166
+ @app.get("/unauthorized", response_class=HTMLResponse)
167
+ async def unauthorized(request: Request):
168
+ return templates.TemplateResponse("unauthorized.html", {"request": request})
169
+
170
+
171
+ @app.get("/login/google")
172
+ async def login_google(request: Request):
173
+ # Clear any existing session cookies to avoid conflicts with guest sessions
174
+ response = RedirectResponse(url="/post-signin")
175
+ response.delete_cookie(key="session_token")
176
+ response.delete_cookie(key="X-User-Info")
177
+
178
+ user_info = await get_user_info_from_cookie(request)
179
+ # Check if user is already signed in using Google
180
+ if user_info and user_info.get("google_signed_in"):
181
+ return RedirectResponse("/post-signin")
182
+ else:
183
+ authorization_url, _ = flow.authorization_url(prompt="consent")
184
+ return RedirectResponse(authorization_url, headers=response.headers)
185
+
186
+
187
+ @app.get("/auth/oauth/google/callback")
188
+ async def auth_google(request: Request):
189
+ try:
190
+ flow.fetch_token(code=request.query_params.get("code"))
191
+ credentials = flow.credentials
192
+ user_info = id_token.verify_oauth2_token(
193
+ credentials.id_token, google_requests.Request(), GOOGLE_CLIENT_ID
194
+ )
195
+
196
+ email = user_info["email"]
197
+ name = user_info.get("name", "")
198
+ profile_image = user_info.get("picture", "")
199
+ role = get_user_role(email)
200
+
201
+ if role == "unauthorized":
202
+ return RedirectResponse("/unauthorized")
203
+
204
+ session_token = secrets.token_hex(16)
205
+ session_store[session_token] = {
206
+ "email": email,
207
+ "name": name,
208
+ "profile_image": profile_image,
209
+ "google_signed_in": True, # Set this flag to True for Google-signed users
210
+ }
211
+
212
+ # add literalai user info to session store to be sent to chainlit
213
+ literalai_user = await get_user_details(email)
214
+ session_store[session_token]["literalai_info"] = literalai_user.to_dict()
215
+ session_store[session_token]["literalai_info"]["metadata"]["role"] = role
216
+
217
+ user_info_json = json.dumps(session_store[session_token])
218
+ user_info_encoded = base64.b64encode(user_info_json.encode()).decode()
219
+
220
+ # Set cookies
221
+ response = RedirectResponse(url="/post-signin", status_code=303)
222
+ response.set_cookie(key="session_token", value=session_token)
223
+ response.set_cookie(
224
+ key="X-User-Info", value=user_info_encoded, httponly=True
225
+ ) # TODO: is the flag httponly=True necessary?
226
+ return response
227
+ except Exception as e:
228
+ print(f"Error during Google OAuth callback: {e}")
229
+ return RedirectResponse(url="/", status_code=302)
230
+
231
+
232
+ @app.get("/cooldown")
233
+ async def cooldown(request: Request):
234
+ user_info = await get_user_info_from_cookie(request)
235
+ user_details = await get_user_details(user_info["email"])
236
+ current_datetime = get_time()
237
+ cooldown, cooldown_end_time = await check_user_cooldown(
238
+ user_details, current_datetime, COOLDOWN_TIME, TOKENS_LEFT, REGEN_TIME
239
+ )
240
+ print(f"User in cooldown: {cooldown}")
241
+ print(f"Cooldown end time: {cooldown_end_time}")
242
+ if cooldown and "admin" not in get_user_role(user_info["email"]):
243
+ return templates.TemplateResponse(
244
+ "cooldown.html",
245
+ {
246
+ "request": request,
247
+ "username": user_info["email"],
248
+ "role": get_user_role(user_info["email"]),
249
+ "cooldown_end_time": cooldown_end_time,
250
+ "tokens_left": user_details.metadata["tokens_left"],
251
+ },
252
+ )
253
+ else:
254
+ user_details.metadata["in_cooldown"] = False
255
+ await update_user_info(user_details)
256
+ await reset_tokens_for_user(
257
+ user_details,
258
+ config["token_config"]["tokens_left"],
259
+ config["token_config"]["regen_time"],
260
+ )
261
+ return RedirectResponse("/post-signin")
262
+
263
+
264
+ @app.get("/post-signin", response_class=HTMLResponse)
265
+ async def post_signin(request: Request):
266
+ user_info = await get_user_info_from_cookie(request)
267
+ if not user_info:
268
+ user_info = get_user_info(request)
269
+ user_details = await get_user_details(user_info["email"])
270
+ current_datetime = get_time()
271
+ user_details.metadata["last_login"] = current_datetime
272
+ # if new user, set the number of tries
273
+ if "tokens_left" not in user_details.metadata:
274
+ user_details.metadata["tokens_left"] = (
275
+ TOKENS_LEFT # set the number of tokens left for the new user
276
+ )
277
+ if "last_message_time" not in user_details.metadata:
278
+ user_details.metadata["last_message_time"] = current_datetime
279
+ if "all_time_tokens_allocated" not in user_details.metadata:
280
+ user_details.metadata["all_time_tokens_allocated"] = ALL_TIME_TOKENS_ALLOCATED
281
+ if "in_cooldown" not in user_details.metadata:
282
+ user_details.metadata["in_cooldown"] = False
283
+ await update_user_info(user_details)
284
+
285
+ if "last_message_time" in user_details.metadata and "admin" not in get_user_role(
286
+ user_info["email"]
287
+ ):
288
+ cooldown, _ = await check_user_cooldown(
289
+ user_details, current_datetime, COOLDOWN_TIME, TOKENS_LEFT, REGEN_TIME
290
+ )
291
+ if cooldown:
292
+ user_details.metadata["in_cooldown"] = True
293
+ return RedirectResponse("/cooldown")
294
+ else:
295
+ user_details.metadata["in_cooldown"] = False
296
+ await reset_tokens_for_user(
297
+ user_details,
298
+ config["token_config"]["tokens_left"],
299
+ config["token_config"]["regen_time"],
300
+ )
301
+
302
+ if user_info:
303
+ username = user_info["email"]
304
+ role = get_user_role(username)
305
+ jwt_token = request.cookies.get("X-User-Info")
306
+ return templates.TemplateResponse(
307
+ "dashboard.html",
308
+ {
309
+ "request": request,
310
+ "username": username,
311
+ "role": role,
312
+ "jwt_token": jwt_token,
313
+ "tokens_left": user_details.metadata["tokens_left"],
314
+ "all_time_tokens_allocated": user_details.metadata[
315
+ "all_time_tokens_allocated"
316
+ ],
317
+ "total_tokens_allocated": ALL_TIME_TOKENS_ALLOCATED,
318
+ },
319
+ )
320
+ return RedirectResponse("/")
321
+
322
+
323
+ @app.get("/start-tutor")
324
+ @app.post("/start-tutor")
325
+ async def start_tutor(request: Request):
326
+ user_info = await get_user_info_from_cookie(request)
327
+ if user_info:
328
+ user_info_json = json.dumps(user_info)
329
+ user_info_encoded = base64.b64encode(user_info_json.encode()).decode()
330
+
331
+ response = RedirectResponse(CHAINLIT_PATH, status_code=303)
332
+ response.set_cookie(key="X-User-Info", value=user_info_encoded, httponly=True)
333
+ return response
334
+
335
+ return RedirectResponse(url="/")
336
+
337
+
338
+ @app.exception_handler(HTTPException)
339
+ async def http_exception_handler(request: Request, exc: HTTPException):
340
+ if exc.status_code == 404:
341
+ return templates.TemplateResponse(
342
+ "error_404.html", {"request": request}, status_code=404
343
+ )
344
+ return templates.TemplateResponse(
345
+ "error.html",
346
+ {"request": request, "error": str(exc)},
347
+ status_code=exc.status_code,
348
+ )
349
+
350
+
351
+ @app.exception_handler(Exception)
352
+ async def exception_handler(request: Request, exc: Exception):
353
+ return templates.TemplateResponse(
354
+ "error.html", {"request": request, "error": str(exc)}, status_code=500
355
+ )
356
+
357
+
358
+ @app.get("/logout", response_class=HTMLResponse)
359
+ async def logout(request: Request, response: Response):
360
+ await del_user_info_from_cookie(request=request, response=response)
361
+ response = RedirectResponse(url="/", status_code=302)
362
+ # Set cookies to empty values and expire them immediately
363
+ response.set_cookie(key="session_token", value="", expires=0)
364
+ response.set_cookie(key="X-User-Info", value="", expires=0)
365
+ return response
366
+
367
+
368
+ @app.get("/get-tokens-left")
369
+ async def get_tokens_left(request: Request):
370
+ try:
371
+ user_info = await get_user_info_from_cookie(request)
372
+ user_details = await get_user_details(user_info["email"])
373
+ await reset_tokens_for_user(
374
+ user_details,
375
+ config["token_config"]["tokens_left"],
376
+ config["token_config"]["regen_time"],
377
+ )
378
+ tokens_left = user_details.metadata["tokens_left"]
379
+ return {"tokens_left": tokens_left}
380
+ except Exception as e:
381
+ print(f"Error getting tokens left: {e}")
382
+ return {"tokens_left": 0}
383
+
384
+
385
+ mount_chainlit(app=app, target="chainlit_app.py", path=CHAINLIT_PATH)
386
+
387
+ if __name__ == "__main__":
388
+ import uvicorn
389
+
390
+ uvicorn.run(app, host="127.0.0.1", port=7860)
apps/ai_tutor/chainlit_app.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import chainlit.data as cl_data
2
+ import asyncio
3
+ from config.constants import (
4
+ LITERAL_API_KEY_LOGGING,
5
+ LITERAL_API_URL,
6
+ )
7
+ from edubotics_core.chat_processor.literal_ai import CustomLiteralDataLayer
8
+ import json
9
+ from typing import Any, Dict, no_type_check
10
+ import chainlit as cl
11
+ from edubotics_core.chat.llm_tutor import LLMTutor
12
+ from edubotics_core.chat.helpers import (
13
+ get_sources,
14
+ get_history_chat_resume,
15
+ get_history_setup_llm,
16
+ # get_last_config,
17
+ )
18
+ from edubotics_core.chat_processor.helpers import (
19
+ update_user_info,
20
+ get_user_details,
21
+ )
22
+ from helpers import (
23
+ check_user_cooldown,
24
+ reset_tokens_for_user,
25
+ )
26
+ from helpers import get_time
27
+ import copy
28
+ from typing import Optional
29
+ from chainlit.types import ThreadDict
30
+ import base64
31
+ from langchain_community.callbacks import get_openai_callback
32
+ from datetime import datetime, timezone
33
+ from config.config_manager import config_manager
34
+
35
+ USER_TIMEOUT = 60_000
36
+ SYSTEM = "System"
37
+ LLM = "AI Tutor"
38
+ AGENT = "Agent"
39
+ YOU = "User"
40
+ ERROR = "Error"
41
+
42
+ # set config
43
+ config = config_manager.get_config().dict()
44
+
45
+
46
+ async def setup_data_layer():
47
+ """
48
+ Set up the data layer for chat logging.
49
+ """
50
+ if config["chat_logging"]["log_chat"]:
51
+ data_layer = CustomLiteralDataLayer(
52
+ api_key=LITERAL_API_KEY_LOGGING, server=LITERAL_API_URL
53
+ )
54
+ else:
55
+ data_layer = None
56
+
57
+ return data_layer
58
+
59
+
60
+ async def update_user_from_chainlit(user, token_count=0):
61
+ if "admin" not in user.metadata["role"]:
62
+ user.metadata["tokens_left"] = user.metadata["tokens_left"] - token_count
63
+ user.metadata["all_time_tokens_allocated"] = (
64
+ user.metadata["all_time_tokens_allocated"] - token_count
65
+ )
66
+ user.metadata["tokens_left_at_last_message"] = user.metadata[
67
+ "tokens_left"
68
+ ] # tokens_left will keep regenerating outside of chainlit
69
+ user.metadata["last_message_time"] = get_time()
70
+ await update_user_info(user)
71
+
72
+ tokens_left = user.metadata["tokens_left"]
73
+ if tokens_left < 0:
74
+ tokens_left = 0
75
+ return tokens_left
76
+
77
+
78
+ class Chatbot:
79
+ def __init__(self, config):
80
+ """
81
+ Initialize the Chatbot class.
82
+ """
83
+ self.config = config
84
+
85
+ @no_type_check
86
+ async def setup_llm(self):
87
+ """
88
+ Set up the LLM with the provided settings. Update the configuration and initialize the LLM tutor.
89
+
90
+ #TODO: Clean this up.
91
+ """
92
+
93
+ llm_settings = cl.user_session.get("llm_settings", {})
94
+ (
95
+ chat_profile,
96
+ retriever_method,
97
+ memory_window,
98
+ llm_style,
99
+ generate_follow_up,
100
+ chunking_mode,
101
+ ) = (
102
+ llm_settings.get("chat_model"),
103
+ llm_settings.get("retriever_method"),
104
+ llm_settings.get("memory_window"),
105
+ llm_settings.get("llm_style"),
106
+ llm_settings.get("follow_up_questions"),
107
+ llm_settings.get("chunking_mode"),
108
+ )
109
+
110
+ chain = cl.user_session.get("chain")
111
+ memory_list = cl.user_session.get(
112
+ "memory",
113
+ (
114
+ list(chain.store.values())[0].messages
115
+ if len(chain.store.values()) > 0
116
+ else []
117
+ ),
118
+ )
119
+ conversation_list = get_history_setup_llm(memory_list)
120
+
121
+ old_config = copy.deepcopy(self.config)
122
+ self.config["vectorstore"]["db_option"] = retriever_method
123
+ self.config["llm_params"]["memory_window"] = memory_window
124
+ self.config["llm_params"]["llm_style"] = llm_style
125
+ self.config["llm_params"]["llm_loader"] = chat_profile
126
+ self.config["llm_params"]["generate_follow_up"] = generate_follow_up
127
+ self.config["splitter_options"]["chunking_mode"] = chunking_mode
128
+
129
+ self.llm_tutor.update_llm(
130
+ old_config, self.config
131
+ ) # update only llm attributes that are changed
132
+ self.chain = self.llm_tutor.qa_bot(
133
+ memory=conversation_list,
134
+ )
135
+
136
+ cl.user_session.set("chain", self.chain)
137
+ cl.user_session.set("llm_tutor", self.llm_tutor)
138
+
139
+ @no_type_check
140
+ async def update_llm(self, new_settings: Dict[str, Any]):
141
+ """
142
+ Update the LLM settings and reinitialize the LLM with the new settings.
143
+
144
+ Args:
145
+ new_settings (Dict[str, Any]): The new settings to update.
146
+ """
147
+ cl.user_session.set("llm_settings", new_settings)
148
+ await self.inform_llm_settings()
149
+ await self.setup_llm()
150
+
151
+ async def make_llm_settings_widgets(self, config=None):
152
+ """
153
+ Create and send the widgets for LLM settings configuration.
154
+
155
+ Args:
156
+ config: The configuration to use for setting up the widgets.
157
+ """
158
+ config = config or self.config
159
+ await cl.ChatSettings(
160
+ [
161
+ cl.input_widget.Select(
162
+ id="chat_model",
163
+ label="Model Name (Default GPT-3)",
164
+ values=["local_llm", "gpt-3.5-turbo-1106", "gpt-4", "gpt-4o-mini"],
165
+ initial_index=[
166
+ "local_llm",
167
+ "gpt-3.5-turbo-1106",
168
+ "gpt-4",
169
+ "gpt-4o-mini",
170
+ ].index(config["llm_params"]["llm_loader"]),
171
+ ),
172
+ cl.input_widget.Select(
173
+ id="retriever_method",
174
+ label="Retriever (Default FAISS)",
175
+ values=["FAISS", "Chroma", "RAGatouille", "RAPTOR"],
176
+ initial_index=["FAISS", "Chroma", "RAGatouille", "RAPTOR"].index(
177
+ config["vectorstore"]["db_option"]
178
+ ),
179
+ ),
180
+ cl.input_widget.Slider(
181
+ id="memory_window",
182
+ label="Memory Window (Default 3)",
183
+ initial=3,
184
+ min=0,
185
+ max=10,
186
+ step=1,
187
+ ),
188
+ cl.input_widget.Switch(
189
+ id="view_sources", label="View Sources", initial=False
190
+ ),
191
+ cl.input_widget.Switch(
192
+ id="stream_response",
193
+ label="Stream response",
194
+ initial=config["llm_params"]["stream"],
195
+ ),
196
+ cl.input_widget.Select(
197
+ id="chunking_mode",
198
+ label="Chunking mode",
199
+ values=["fixed", "semantic"],
200
+ initial_index=1,
201
+ ),
202
+ cl.input_widget.Switch(
203
+ id="follow_up_questions",
204
+ label="Generate follow up questions",
205
+ initial=False,
206
+ ),
207
+ cl.input_widget.Select(
208
+ id="llm_style",
209
+ label="Type of Conversation (Default Normal)",
210
+ values=["Normal", "ELI5"],
211
+ initial_index=0,
212
+ ),
213
+ ]
214
+ ).send()
215
+
216
+ @no_type_check
217
+ async def inform_llm_settings(self):
218
+ """
219
+ Inform the user about the updated LLM settings and display them as a message.
220
+ """
221
+ await cl.Message(
222
+ author=SYSTEM,
223
+ content="LLM settings have been updated. You can continue with your Query!",
224
+ ).send()
225
+
226
+ async def set_starters(self):
227
+ """
228
+ Set starter messages for the chatbot.
229
+ """
230
+ # Return Starters only if the chat is new
231
+
232
+ try:
233
+ thread = cl_data._data_layer.get_thread(
234
+ cl.context.session.thread_id
235
+ ) # see if the thread has any steps
236
+ if thread.steps or len(thread.steps) > 0:
237
+ return None
238
+ except Exception as e:
239
+ print(e)
240
+ return [
241
+ cl.Starter(
242
+ label="recording on Transformers?",
243
+ message="Where can I find the recording for the lecture on Transformers?",
244
+ icon="/public/assets/images/starter_icons/adv-screen-recorder-svgrepo-com.svg",
245
+ ),
246
+ cl.Starter(
247
+ label="where's the schedule?",
248
+ message="When are the lectures? I can't find the schedule.",
249
+ icon="/public/assets/images/starter_icons/alarmy-svgrepo-com.svg",
250
+ ),
251
+ cl.Starter(
252
+ label="Due Date?",
253
+ message="When is the final project due?",
254
+ icon="/public/assets/images/starter_icons/calendar-samsung-17-svgrepo-com.svg",
255
+ ),
256
+ cl.Starter(
257
+ label="Explain backprop.",
258
+ message="I didn't understand the math behind backprop, could you explain it?",
259
+ icon="/public/assets/images/starter_icons/acastusphoton-svgrepo-com.svg",
260
+ ),
261
+ ]
262
+
263
+ def rename(self, orig_author: str):
264
+ """
265
+ Rename the original author to a more user-friendly name.
266
+
267
+ Args:
268
+ orig_author (str): The original author's name.
269
+
270
+ Returns:
271
+ str: The renamed author.
272
+ """
273
+ rename_dict = {"Chatbot": LLM}
274
+ return rename_dict.get(orig_author, orig_author)
275
+
276
+ async def start(self):
277
+ """
278
+ Start the chatbot, initialize settings widgets,
279
+ and display and load previous conversation if chat logging is enabled.
280
+ """
281
+
282
+ await self.make_llm_settings_widgets(self.config) # Reload the settings widgets
283
+
284
+ user = cl.user_session.get("user")
285
+
286
+ # TODO: remove self.user with cl.user_session.get("user")
287
+ try:
288
+ self.user = {
289
+ "user_id": user.identifier,
290
+ "session_id": cl.context.session.thread_id,
291
+ }
292
+ except Exception as e:
293
+ print(e)
294
+ self.user = {
295
+ "user_id": "guest",
296
+ "session_id": cl.context.session.thread_id,
297
+ }
298
+
299
+ memory = cl.user_session.get("memory", [])
300
+ self.llm_tutor = LLMTutor(self.config, user=self.user)
301
+
302
+ self.chain = self.llm_tutor.qa_bot(
303
+ memory=memory,
304
+ )
305
+ self.question_generator = self.llm_tutor.question_generator
306
+ cl.user_session.set("llm_tutor", self.llm_tutor)
307
+ cl.user_session.set("chain", self.chain)
308
+
309
+ async def stream_response(self, response):
310
+ """
311
+ Stream the response from the LLM.
312
+
313
+ Args:
314
+ response: The response from the LLM.
315
+ """
316
+ msg = cl.Message(content="")
317
+ await msg.send()
318
+
319
+ output = {}
320
+ for chunk in response:
321
+ if "answer" in chunk:
322
+ await msg.stream_token(chunk["answer"])
323
+
324
+ for key in chunk:
325
+ if key not in output:
326
+ output[key] = chunk[key]
327
+ else:
328
+ output[key] += chunk[key]
329
+ return output
330
+
331
+ async def main(self, message):
332
+ """
333
+ Process and Display the Conversation.
334
+
335
+ Args:
336
+ message: The incoming chat message.
337
+ """
338
+
339
+ chain = cl.user_session.get("chain")
340
+ token_count = 0 # initialize token count
341
+ if not chain:
342
+ await self.start() # start the chatbot if the chain is not present
343
+ chain = cl.user_session.get("chain")
344
+
345
+ # update user info with last message time
346
+ user = cl.user_session.get("user")
347
+ await reset_tokens_for_user(
348
+ user,
349
+ self.config["token_config"]["tokens_left"],
350
+ self.config["token_config"]["regen_time"],
351
+ )
352
+ updated_user = await get_user_details(user.identifier)
353
+ user.metadata = updated_user.metadata
354
+ cl.user_session.set("user", user)
355
+
356
+ # see if user has token credits left
357
+ # if not, return message saying they have run out of tokens
358
+ if user.metadata["tokens_left"] <= 0 and "admin" not in user.metadata["role"]:
359
+ current_datetime = get_time()
360
+ cooldown, cooldown_end_time = await check_user_cooldown(
361
+ user,
362
+ current_datetime,
363
+ self.config["token_config"]["cooldown_time"],
364
+ self.config["token_config"]["tokens_left"],
365
+ self.config["token_config"]["regen_time"],
366
+ )
367
+ if cooldown:
368
+ # get time left in cooldown
369
+ # convert both to datetime objects
370
+ cooldown_end_time = datetime.fromisoformat(cooldown_end_time).replace(
371
+ tzinfo=timezone.utc
372
+ )
373
+ current_datetime = datetime.fromisoformat(current_datetime).replace(
374
+ tzinfo=timezone.utc
375
+ )
376
+ cooldown_time_left = cooldown_end_time - current_datetime
377
+ # Get the total seconds
378
+ total_seconds = int(cooldown_time_left.total_seconds())
379
+ # Calculate hours, minutes, and seconds
380
+ hours, remainder = divmod(total_seconds, 3600)
381
+ minutes, seconds = divmod(remainder, 60)
382
+ # Format the time as 00 hrs 00 mins 00 secs
383
+ formatted_time = f"{hours:02} hrs {minutes:02} mins {seconds:02} secs"
384
+ await cl.Message(
385
+ content=(
386
+ "Ah, seems like you have run out of tokens...Click "
387
+ '<a href="/cooldown" style="color: #0000CD; text-decoration: none;" target="_self">here</a> for more info. Please come back after {}'.format(
388
+ formatted_time
389
+ )
390
+ ),
391
+ author=SYSTEM,
392
+ ).send()
393
+ user.metadata["in_cooldown"] = True
394
+ await update_user_info(user)
395
+ return
396
+ else:
397
+ await cl.Message(
398
+ content=(
399
+ "Ah, seems like you don't have any tokens left...Please wait while we regenerate your tokens. Click "
400
+ '<a href="/cooldown" style="color: #0000CD; text-decoration: none;" target="_self">here</a> to view your token credits.'
401
+ ),
402
+ author=SYSTEM,
403
+ ).send()
404
+ return
405
+
406
+ user.metadata["in_cooldown"] = False
407
+
408
+ llm_settings = cl.user_session.get("llm_settings", {})
409
+ view_sources = llm_settings.get("view_sources", False)
410
+ stream = llm_settings.get("stream_response", False)
411
+ stream = False # Fix streaming
412
+ user_query_dict = {"input": message.content}
413
+ # Define the base configuration
414
+ cb = cl.AsyncLangchainCallbackHandler()
415
+ chain_config = {
416
+ "configurable": {
417
+ "user_id": self.user["user_id"],
418
+ "conversation_id": self.user["session_id"],
419
+ "memory_window": self.config["llm_params"]["memory_window"],
420
+ },
421
+ "callbacks": (
422
+ [cb]
423
+ if cl_data._data_layer and self.config["chat_logging"]["callbacks"]
424
+ else None
425
+ ),
426
+ }
427
+
428
+ with get_openai_callback() as token_count_cb:
429
+ if stream:
430
+ res = chain.stream(user_query=user_query_dict, config=chain_config)
431
+ res = await self.stream_response(res)
432
+ else:
433
+ res = await chain.invoke(
434
+ user_query=user_query_dict,
435
+ config=chain_config,
436
+ )
437
+ token_count += token_count_cb.total_tokens
438
+
439
+ answer = res.get("answer", res.get("result"))
440
+
441
+ answer_with_sources, source_elements, sources_dict = get_sources(
442
+ res, answer, stream=stream, view_sources=view_sources
443
+ )
444
+ answer_with_sources = answer_with_sources.replace("$$", "$")
445
+
446
+ actions = []
447
+
448
+ if self.config["llm_params"]["generate_follow_up"]:
449
+ cb_follow_up = cl.AsyncLangchainCallbackHandler()
450
+ config = {
451
+ "callbacks": (
452
+ [cb_follow_up]
453
+ if cl_data._data_layer and self.config["chat_logging"]["callbacks"]
454
+ else None
455
+ )
456
+ }
457
+ with get_openai_callback() as token_count_cb:
458
+ list_of_questions = await self.question_generator.generate_questions(
459
+ query=user_query_dict["input"],
460
+ response=answer,
461
+ chat_history=res.get("chat_history"),
462
+ context=res.get("context"),
463
+ config=config,
464
+ )
465
+
466
+ token_count += token_count_cb.total_tokens
467
+
468
+ for question in list_of_questions:
469
+ actions.append(
470
+ cl.Action(
471
+ name="follow up question",
472
+ value="example_value",
473
+ description=question,
474
+ label=question,
475
+ )
476
+ )
477
+
478
+ # # update user info with token count
479
+ tokens_left = await update_user_from_chainlit(user, token_count)
480
+
481
+ answer_with_sources += (
482
+ '\n\n<footer><span style="font-size: 0.8em; text-align: right; display: block;">Tokens Left: '
483
+ + str(tokens_left)
484
+ + "</span></footer>\n"
485
+ )
486
+
487
+ await cl.Message(
488
+ content=answer_with_sources,
489
+ elements=source_elements,
490
+ author=LLM,
491
+ actions=actions,
492
+ ).send()
493
+
494
+ async def on_chat_resume(self, thread: ThreadDict):
495
+ # thread_config = None
496
+ steps = thread["steps"]
497
+ k = self.config["llm_params"][
498
+ "memory_window"
499
+ ] # on resume, alwyas use the default memory window
500
+ conversation_list = get_history_chat_resume(steps, k, SYSTEM, LLM)
501
+ # thread_config = get_last_config(
502
+ # steps
503
+ # ) # TODO: Returns None for now - which causes config to be reloaded with default values
504
+ cl.user_session.set("memory", conversation_list)
505
+ await self.start()
506
+
507
+ @cl.header_auth_callback
508
+ def header_auth_callback(headers: dict) -> Optional[cl.User]:
509
+ # try: # TODO: Add try-except block after testing
510
+ # TODO: Implement to get the user information from the headers (not the cookie)
511
+ cookie = headers.get("cookie") # gets back a str
512
+ # Create a dictionary from the pairs
513
+ cookie_dict = {}
514
+ for pair in cookie.split("; "):
515
+ key, value = pair.split("=", 1)
516
+ # Strip surrounding quotes if present
517
+ cookie_dict[key] = value.strip('"')
518
+
519
+ decoded_user_info = base64.b64decode(
520
+ cookie_dict.get("X-User-Info", "")
521
+ ).decode()
522
+ decoded_user_info = json.loads(decoded_user_info)
523
+
524
+ return cl.User(
525
+ id=decoded_user_info["literalai_info"]["id"],
526
+ identifier=decoded_user_info["literalai_info"]["identifier"],
527
+ metadata=decoded_user_info["literalai_info"]["metadata"],
528
+ )
529
+
530
+ async def on_follow_up(self, action: cl.Action):
531
+ user = cl.user_session.get("user")
532
+ message = await cl.Message(
533
+ content=action.description,
534
+ type="user_message",
535
+ author=user.identifier,
536
+ ).send()
537
+ async with cl.Step(
538
+ name="on_follow_up", type="run", parent_id=message.id
539
+ ) as step:
540
+ await self.main(message)
541
+ step.output = message.content
542
+
543
+
544
+ chatbot = Chatbot(config=config)
545
+
546
+
547
+ async def start_app():
548
+ cl_data._data_layer = await setup_data_layer()
549
+ chatbot.literal_client = cl_data._data_layer.client if cl_data._data_layer else None
550
+ cl.set_starters(chatbot.set_starters)
551
+ cl.author_rename(chatbot.rename)
552
+ cl.on_chat_start(chatbot.start)
553
+ cl.on_chat_resume(chatbot.on_chat_resume)
554
+ cl.on_message(chatbot.main)
555
+ cl.on_settings_update(chatbot.update_llm)
556
+ cl.action_callback("follow up question")(chatbot.on_follow_up)
557
+
558
+
559
+ loop = asyncio.get_event_loop()
560
+ if loop.is_running():
561
+ asyncio.ensure_future(start_app())
562
+ else:
563
+ asyncio.run(start_app())
apps/ai_tutor/config/__pycache__/config_manager.cpython-311.pyc ADDED
Binary file (11.7 kB). View file
 
apps/ai_tutor/config/__pycache__/constants.cpython-311.pyc ADDED
Binary file (1.33 kB). View file
 
apps/ai_tutor/config/__pycache__/prompts.cpython-311.pyc ADDED
Binary file (6.98 kB). View file
 
apps/ai_tutor/config/config.yml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_dir: 'storage/logs' # str
2
+ log_chunk_dir: 'storage/logs/chunks' # str
3
+ device: 'cpu' # str [cuda, cpu]
4
+
5
+ vectorstore:
6
+ load_from_HF: True # bool
7
+ reparse_files: True # bool
8
+ data_path: 'storage/data' # str
9
+ url_file_path: 'storage/data/urls.txt' # str
10
+ expand_urls: True # bool
11
+ db_option : 'RAGatouille' # str [FAISS, Chroma, RAGatouille, RAPTOR]
12
+ db_path : 'vectorstores' # str
13
+ model : 'sentence-transformers/all-MiniLM-L6-v2' # str [sentence-transformers/all-MiniLM-L6-v2, text-embedding-ada-002']
14
+ search_top_k : 3 # int
15
+ score_threshold : 0.2 # float
16
+
17
+ faiss_params: # Not used as of now
18
+ index_path: 'vectorstores/faiss.index' # str
19
+ index_type: 'Flat' # str [Flat, HNSW, IVF]
20
+ index_dimension: 384 # int
21
+ index_nlist: 100 # int
22
+ index_nprobe: 10 # int
23
+
24
+ colbert_params:
25
+ index_name: "new_idx" # str
26
+
27
+ llm_params:
28
+ llm_arch: 'langchain' # [langchain]
29
+ use_history: True # bool
30
+ generate_follow_up: False # bool
31
+ memory_window: 3 # int
32
+ llm_style: 'Normal' # str [Normal, ELI5]
33
+ llm_loader: 'gpt-4o-mini' # str [local_llm, gpt-3.5-turbo-1106, gpt-4, gpt-4o-mini]
34
+ openai_params:
35
+ temperature: 0.7 # float
36
+ local_llm_params:
37
+ temperature: 0.7 # float
38
+ repo_id: 'TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF' # HuggingFace repo id
39
+ filename: 'tinyllama-1.1b-chat-v1.0.Q5_0.gguf' # Specific name of gguf file in the repo
40
+ model_path: 'storage/models/tinyllama-1.1b-chat-v1.0.Q5_0.gguf' # Path to the model file
41
+ stream: False # bool
42
+ pdf_reader: 'gpt' # str [llama, pymupdf, gpt]
43
+
44
+ chat_logging:
45
+ log_chat: True # bool
46
+ platform: 'literalai'
47
+ callbacks: True # bool
48
+
49
+ splitter_options:
50
+ use_splitter: True # bool
51
+ split_by_token : True # bool
52
+ remove_leftover_delimiters: True # bool
53
+ remove_chunks: False # bool
54
+ chunking_mode: 'semantic' # str [fixed, semantic]
55
+ chunk_size : 300 # int
56
+ chunk_overlap : 30 # int
57
+ chunk_separators : ["\n\n", "\n", " ", ""] # list of strings
58
+ front_chunks_to_remove : null # int or None
59
+ last_chunks_to_remove : null # int or None
60
+ delimiters_to_remove : ['\t', '\n', ' ', ' '] # list of strings
apps/ai_tutor/config/config_manager.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, conint, confloat, HttpUrl
2
+ from typing import Optional, List
3
+ import yaml
4
+
5
+
6
+ class FaissParams(BaseModel):
7
+ index_path: str = "vectorstores/faiss.index"
8
+ index_type: str = "Flat" # Options: [Flat, HNSW, IVF]
9
+ index_dimension: conint(gt=0) = 384
10
+ index_nlist: conint(gt=0) = 100
11
+ index_nprobe: conint(gt=0) = 10
12
+
13
+
14
+ class ColbertParams(BaseModel):
15
+ index_name: str = "new_idx"
16
+
17
+
18
+ class VectorStoreConfig(BaseModel):
19
+ load_from_HF: bool = True
20
+ reparse_files: bool = True
21
+ data_path: str = "storage/data"
22
+ url_file_path: str = "storage/data/urls.txt"
23
+ expand_urls: bool = True
24
+ db_option: str = "RAGatouille" # Options: [FAISS, Chroma, RAGatouille, RAPTOR]
25
+ db_path: str = "vectorstores"
26
+ model: str = (
27
+ # Options: [sentence-transformers/all-MiniLM-L6-v2, text-embedding-ada-002]
28
+ "sentence-transformers/all-MiniLM-L6-v2"
29
+ )
30
+ search_top_k: conint(gt=0) = 3
31
+ score_threshold: confloat(ge=0.0, le=1.0) = 0.2
32
+
33
+ faiss_params: Optional[FaissParams] = None
34
+ colbert_params: Optional[ColbertParams] = None
35
+
36
+
37
+ class OpenAIParams(BaseModel):
38
+ temperature: confloat(ge=0.0, le=1.0) = 0.7
39
+
40
+
41
+ class LocalLLMParams(BaseModel):
42
+ temperature: confloat(ge=0.0, le=1.0) = 0.7
43
+ repo_id: str = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF" # HuggingFace repo id
44
+ filename: str = (
45
+ "tinyllama-1.1b-chat-v1.0.Q5_0.gguf" # Specific name of gguf file in the repo
46
+ )
47
+ model_path: str = (
48
+ "storage/models/tinyllama-1.1b-chat-v1.0.Q5_0.gguf" # Path to the model file
49
+ )
50
+
51
+
52
+ class LLMParams(BaseModel):
53
+ llm_arch: str = "langchain" # Options: [langchain]
54
+ use_history: bool = True
55
+ generate_follow_up: bool = False
56
+ memory_window: conint(ge=1) = 3
57
+ llm_style: str = "Normal" # Options: [Normal, ELI5]
58
+ llm_loader: str = (
59
+ "gpt-4o-mini" # Options: [local_llm, gpt-3.5-turbo-1106, gpt-4, gpt-4o-mini]
60
+ )
61
+ openai_params: Optional[OpenAIParams] = None
62
+ local_llm_params: Optional[LocalLLMParams] = None
63
+ stream: bool = False
64
+ pdf_reader: str = "gpt" # Options: [llama, pymupdf, gpt]
65
+
66
+
67
+ class ChatLoggingConfig(BaseModel):
68
+ log_chat: bool = True
69
+ platform: str = "literalai"
70
+ callbacks: bool = True
71
+
72
+
73
+ class SplitterOptions(BaseModel):
74
+ use_splitter: bool = True
75
+ split_by_token: bool = True
76
+ remove_leftover_delimiters: bool = True
77
+ remove_chunks: bool = False
78
+ chunking_mode: str = "semantic" # Options: [fixed, semantic]
79
+ chunk_size: conint(gt=0) = 300
80
+ chunk_overlap: conint(ge=0) = 30
81
+ chunk_separators: List[str] = ["\n\n", "\n", " ", ""]
82
+ front_chunks_to_remove: Optional[conint(ge=0)] = None
83
+ last_chunks_to_remove: Optional[conint(ge=0)] = None
84
+ delimiters_to_remove: List[str] = ["\t", "\n", " ", " "]
85
+
86
+
87
+ class RetrieverConfig(BaseModel):
88
+ retriever_hf_paths: dict[str, str] = {"RAGatouille": "XThomasBU/Colbert_Index"}
89
+
90
+
91
+ class MetadataConfig(BaseModel):
92
+ metadata_links: List[HttpUrl] = [
93
+ "https://dl4ds.github.io/sp2024/lectures/",
94
+ "https://dl4ds.github.io/sp2024/schedule/",
95
+ ]
96
+ slide_base_link: HttpUrl = "https://dl4ds.github.io"
97
+
98
+
99
+ class TokenConfig(BaseModel):
100
+ cooldown_time: conint(gt=0) = 60
101
+ regen_time: conint(gt=0) = 180
102
+ tokens_left: conint(gt=0) = 2000
103
+ all_time_tokens_allocated: conint(gt=0) = 1000000
104
+
105
+
106
+ class MiscConfig(BaseModel):
107
+ github_repo: HttpUrl = "https://github.com/edubotics-ai/edubot-core"
108
+ docs_website: HttpUrl = "https://dl4ds.github.io/dl4ds_tutor/"
109
+
110
+
111
+ class APIConfig(BaseModel):
112
+ timeout: conint(gt=0) = 60
113
+
114
+
115
+ class Config(BaseModel):
116
+ log_dir: str = "storage/logs"
117
+ log_chunk_dir: str = "storage/logs/chunks"
118
+ device: str = "cpu" # Options: ['cuda', 'cpu']
119
+
120
+ vectorstore: VectorStoreConfig
121
+ llm_params: LLMParams
122
+ chat_logging: ChatLoggingConfig
123
+ splitter_options: SplitterOptions
124
+ retriever: RetrieverConfig
125
+ metadata: MetadataConfig
126
+ token_config: TokenConfig
127
+ misc: MiscConfig
128
+ api_config: APIConfig
129
+
130
+
131
+ class ConfigManager:
132
+ def __init__(self, config_path: str, project_config_path: str):
133
+ self.config_path = config_path
134
+ self.project_config_path = project_config_path
135
+ self.config = self.load_config()
136
+ self.validate_config()
137
+
138
+ def load_config(self) -> Config:
139
+ with open(self.config_path, "r") as f:
140
+ config_data = yaml.safe_load(f)
141
+
142
+ with open(self.project_config_path, "r") as f:
143
+ project_config_data = yaml.safe_load(f)
144
+
145
+ # Merge the two configurations
146
+ merged_config = {**config_data, **project_config_data}
147
+
148
+ return Config(**merged_config)
149
+
150
+ def get_config(self) -> Config:
151
+ return ConfigWrapper(self.config)
152
+
153
+ def validate_config(self):
154
+ # If any required fields are missing, raise an error
155
+ # required_fields = [
156
+ # "vectorstore", "llm_params", "chat_logging", "splitter_options",
157
+ # "retriever", "metadata", "token_config", "misc", "api_config"
158
+ # ]
159
+ # for field in required_fields:
160
+ # if not hasattr(self.config, field):
161
+ # raise ValueError(f"Missing required configuration field: {field}")
162
+
163
+ # # Validate types of specific fields
164
+ # if not isinstance(self.config.vectorstore, VectorStoreConfig):
165
+ # raise TypeError("vectorstore must be an instance of VectorStoreConfig")
166
+ # if not isinstance(self.config.llm_params, LLMParams):
167
+ # raise TypeError("llm_params must be an instance of LLMParams")
168
+ pass
169
+
170
+
171
+ class ConfigWrapper:
172
+ def __init__(self, config: Config):
173
+ self._config = config
174
+
175
+ def __getitem__(self, key):
176
+ return getattr(self._config, key)
177
+
178
+ def __getattr__(self, name):
179
+ return getattr(self._config, name)
180
+
181
+ def dict(self):
182
+ return self._config.dict()
183
+
184
+
185
+ # Usage
186
+ config_manager = ConfigManager(
187
+ config_path="config/config.yml", project_config_path="config/project_config.yml"
188
+ )
189
+ # config = config_manager.get_config().dict()
apps/ai_tutor/config/constants.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ import os
3
+
4
+ load_dotenv()
5
+
6
+ # API Keys - Loaded from the .env file
7
+
8
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
9
+ LLAMA_CLOUD_API_KEY = os.getenv("LLAMA_CLOUD_API_KEY")
10
+ HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
11
+ LITERAL_API_KEY_LOGGING = os.getenv("LITERAL_API_KEY_LOGGING")
12
+ LITERAL_API_URL = os.getenv("LITERAL_API_URL")
13
+ CHAINLIT_URL = os.getenv("CHAINLIT_URL")
14
+ EMAIL_ENCRYPTION_KEY = os.getenv("EMAIL_ENCRYPTION_KEY")
15
+
16
+ OAUTH_GOOGLE_CLIENT_ID = os.getenv("OAUTH_GOOGLE_CLIENT_ID")
17
+ OAUTH_GOOGLE_CLIENT_SECRET = os.getenv("OAUTH_GOOGLE_CLIENT_SECRET")
18
+
19
+ opening_message = "Hey, What Can I Help You With?\n\nYou can me ask me questions about the course logistics, course content, about the final project, or anything else!"
20
+ chat_end_message = (
21
+ "I hope I was able to help you. If you have any more questions, feel free to ask!"
22
+ )
23
+
24
+ # Model Paths
25
+
26
+ LLAMA_PATH = "../storage/models/tinyllama"
apps/ai_tutor/config/project_config.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ retriever:
2
+ retriever_hf_paths:
3
+ RAGatouille: "XThomasBU/Colbert_Index"
4
+
5
+ metadata:
6
+ metadata_links: ["https://dl4ds.github.io/sp2024/lectures/", "https://dl4ds.github.io/sp2024/schedule/"]
7
+ slide_base_link: "https://dl4ds.github.io"
8
+
9
+ token_config:
10
+ cooldown_time: 60
11
+ regen_time: 180
12
+ tokens_left: 2000
13
+ all_time_tokens_allocated: 1000000
14
+
15
+ misc:
16
+ github_repo: "https://github.com/edubotics-ai/edubot-core"
17
+ docs_website: "https://dl4ds.github.io/dl4ds_tutor/"
18
+
19
+ api_config:
20
+ timeout: 60
apps/ai_tutor/config/prompts.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prompts = {
2
+ "openai": {
3
+ "rephrase_prompt": (
4
+ "You are someone that rephrases statements. Rephrase the student's question to add context from their chat history if relevant, ensuring it remains from the student's point of view. "
5
+ "Incorporate relevant details from the chat history to make the question clearer and more specific. "
6
+ "Do not change the meaning of the original statement, and maintain the student's tone and perspective. "
7
+ "If the question is conversational and doesn't require context, do not rephrase it. "
8
+ "Example: If the student previously asked about backpropagation in the context of deep learning and now asks 'what is it', rephrase to 'What is backpropagation.'. "
9
+ "Example: Do not rephrase if the user is asking something specific like 'cool, suggest a project with transformers to use as my final project' "
10
+ "Chat history: \n{chat_history}\n"
11
+ "Rephrase the following question only if necessary: '{input}'"
12
+ "Rephrased Question:'"
13
+ ),
14
+ "prompt_with_history": {
15
+ "normal": (
16
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
17
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
18
+ "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata. Use the source context that is most relevant. "
19
+ "Render math equations in LaTeX format between $ or $$ signs, stick to the parameter and variable icons found in your context. Be sure to explain the parameters and variables in the equations."
20
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n\n"
21
+ "Do not get influenced by the style of conversation in the chat history. Follow the instructions given here."
22
+ "Chat History:\n{chat_history}\n\n"
23
+ "Context:\n{context}\n\n"
24
+ "Answer the student's question below in a friendly, concise, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
25
+ "Student: {input}\n"
26
+ "AI Tutor:"
27
+ ),
28
+ "eli5": (
29
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Your job is to explain things in the simplest and most engaging way possible, just like the 'Explain Like I'm 5' (ELI5) concept."
30
+ "If you don't know the answer, do your best without making things up. Keep your explanations straightforward and very easy to understand."
31
+ "Use the chat history and context to help you, but avoid repeating past responses. Provide links from the source_file metadata when they're helpful."
32
+ "Use very simple language and examples to explain any math equations, and put the equations in LaTeX format between $ or $$ signs."
33
+ "Be friendly and engaging, like you're chatting with a young child who's curious and eager to learn. Avoid complex terms and jargon."
34
+ "Include simple and clear examples wherever you can to make things easier to understand."
35
+ "Do not get influenced by the style of conversation in the chat history. Follow the instructions given here."
36
+ "Chat History:\n{chat_history}\n\n"
37
+ "Context:\n{context}\n\n"
38
+ "Answer the student's question below in a friendly, simple, and engaging way, just like the ELI5 concept. Use the context and history only if they're relevant, otherwise, just have a natural conversation."
39
+ "Give a clear and detailed explanation with simple examples to make it easier to understand. Remember, your goal is to break down complex topics into very simple terms, just like ELI5."
40
+ "Student: {input}\n"
41
+ "AI Tutor:"
42
+ ),
43
+ "socratic": (
44
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Engage the student in a Socratic dialogue to help them discover answers on their own. Use the provided context to guide your questioning."
45
+ "If you don't know the answer, do your best without making things up. Keep the conversation engaging and inquisitive."
46
+ "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata when relevant. Use the source context that is most relevant."
47
+ "Speak in a friendly and engaging manner, encouraging critical thinking and self-discovery."
48
+ "Use questions to lead the student to explore the topic and uncover answers."
49
+ "Chat History:\n{chat_history}\n\n"
50
+ "Context:\n{context}\n\n"
51
+ "Answer the student's question below by guiding them through a series of questions and insights that lead to deeper understanding. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation."
52
+ "Foster an inquisitive mindset and help the student discover answers through dialogue."
53
+ "Student: {input}\n"
54
+ "AI Tutor:"
55
+ ),
56
+ },
57
+ "prompt_no_history": (
58
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
59
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
60
+ "Provide links from the source_file metadata. Use the source context that is most relevant. "
61
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n\n"
62
+ "Context:\n{context}\n\n"
63
+ "Answer the student's question below in a friendly, concise, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
64
+ "Student: {input}\n"
65
+ "AI Tutor:"
66
+ ),
67
+ },
68
+ "tiny_llama": {
69
+ "prompt_no_history": (
70
+ "system\n"
71
+ "Assistant is an intelligent chatbot designed to help students with questions regarding the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance.\n"
72
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally.\n"
73
+ "Provide links from the source_file metadata. Use the source context that is most relevant.\n"
74
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n"
75
+ "\n\n"
76
+ "user\n"
77
+ "Context:\n{context}\n\n"
78
+ "Question: {input}\n"
79
+ "\n\n"
80
+ "assistant"
81
+ ),
82
+ "prompt_with_history": (
83
+ "system\n"
84
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
85
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
86
+ "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata. Use the source context that is most relevant. "
87
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n"
88
+ "\n\n"
89
+ "user\n"
90
+ "Chat History:\n{chat_history}\n\n"
91
+ "Context:\n{context}\n\n"
92
+ "Question: {input}\n"
93
+ "\n\n"
94
+ "assistant"
95
+ ),
96
+ },
97
+ }
apps/ai_tutor/encrypt_students.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import hashlib
3
+ import json
4
+ import argparse
5
+ from dotenv import load_dotenv
6
+
7
+
8
+ # Function to deterministically hash emails
9
+ def deterministic_hash(email, salt):
10
+ return hashlib.pbkdf2_hmac("sha256", email.encode(), salt, 100000).hex()
11
+
12
+
13
+ def main(args):
14
+ # Load the .env file
15
+ load_dotenv()
16
+
17
+ # Get the encryption key (salt)
18
+ encryption_salt = os.getenv("EMAIL_ENCRYPTION_KEY").encode()
19
+
20
+ # Load emails from the specified JSON file
21
+ with open(args.students_file, "r") as file:
22
+ emails = json.load(file)
23
+
24
+ # Replace emails with deterministic hashed emails, {hashed_email: [roles]}
25
+ hashed_emails = {
26
+ deterministic_hash(email, encryption_salt): roles
27
+ for email, roles in emails.items()
28
+ }
29
+
30
+ # Save hashed emails to the specified encrypted JSON file
31
+ with open(args.encrypted_students_file, "w") as file:
32
+ json.dump(hashed_emails, file)
33
+
34
+
35
+ if __name__ == "__main__":
36
+ parser = argparse.ArgumentParser(
37
+ description="Encrypt student emails in a JSON file."
38
+ )
39
+ parser.add_argument(
40
+ "--students-file",
41
+ type=str,
42
+ default="private/students.json",
43
+ help="Path to the students JSON file",
44
+ )
45
+ parser.add_argument(
46
+ "--encrypted-students-file",
47
+ type=str,
48
+ default="public/files/students_encrypted.json",
49
+ help="Path to save the encrypted students JSON file",
50
+ )
51
+ args = parser.parse_args()
52
+
53
+ main(args)
apps/ai_tutor/helpers.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime, timedelta, timezone
2
+ import tiktoken
3
+ from edubotics_core.chat_processor.helpers import update_user_info, convert_to_dict
4
+
5
+
6
+ def get_time():
7
+ return datetime.now(timezone.utc).isoformat()
8
+
9
+
10
+ async def check_user_cooldown(
11
+ user_info, current_time, COOLDOWN_TIME, TOKENS_LEFT, REGEN_TIME
12
+ ):
13
+ # # Check if no tokens left
14
+ tokens_left = user_info.metadata.get("tokens_left", 0)
15
+ if tokens_left > 0 and not user_info.metadata.get("in_cooldown", False):
16
+ return False, None
17
+
18
+ user_info = convert_to_dict(user_info)
19
+ last_message_time_str = user_info["metadata"].get("last_message_time")
20
+
21
+ # Convert from ISO format string to datetime object and ensure UTC timezone
22
+ last_message_time = datetime.fromisoformat(last_message_time_str).replace(
23
+ tzinfo=timezone.utc
24
+ )
25
+ current_time = datetime.fromisoformat(current_time).replace(tzinfo=timezone.utc)
26
+
27
+ # Calculate the elapsed time
28
+ elapsed_time = current_time - last_message_time
29
+ elapsed_time_in_seconds = elapsed_time.total_seconds()
30
+
31
+ # Calculate when the cooldown period ends
32
+ cooldown_end_time = last_message_time + timedelta(seconds=COOLDOWN_TIME)
33
+ cooldown_end_time_iso = cooldown_end_time.isoformat()
34
+
35
+ # Check if the user is still in cooldown
36
+ if elapsed_time_in_seconds < COOLDOWN_TIME:
37
+ return True, cooldown_end_time_iso # Return in ISO 8601 format
38
+
39
+ user_info["metadata"]["in_cooldown"] = False
40
+ # If not in cooldown, regenerate tokens
41
+ await reset_tokens_for_user(user_info, TOKENS_LEFT, REGEN_TIME)
42
+
43
+ return False, None
44
+
45
+
46
+ async def reset_tokens_for_user(user_info, TOKENS_LEFT, REGEN_TIME):
47
+ user_info = convert_to_dict(user_info)
48
+ last_message_time_str = user_info["metadata"].get("last_message_time")
49
+
50
+ last_message_time = datetime.fromisoformat(last_message_time_str).replace(
51
+ tzinfo=timezone.utc
52
+ )
53
+ current_time = datetime.fromisoformat(get_time()).replace(tzinfo=timezone.utc)
54
+
55
+ # Calculate the elapsed time since the last message
56
+ elapsed_time_in_seconds = (current_time - last_message_time).total_seconds()
57
+
58
+ # Current token count (can be negative)
59
+ current_tokens = user_info["metadata"].get("tokens_left_at_last_message", 0)
60
+ current_tokens = min(current_tokens, TOKENS_LEFT)
61
+
62
+ # Maximum tokens that can be regenerated
63
+ max_tokens = user_info["metadata"].get("max_tokens", TOKENS_LEFT)
64
+
65
+ # Calculate how many tokens should have been regenerated proportionally
66
+ if current_tokens < max_tokens:
67
+ # Calculate the regeneration rate per second based on REGEN_TIME for full regeneration
68
+ # If current_tokens is close to 0, then the regeneration rate is relatively high, and if current_tokens is close to max_tokens, then the regeneration rate is relatively low
69
+ regeneration_rate_per_second = (
70
+ max_tokens - max(current_tokens, 0)
71
+ ) / REGEN_TIME
72
+
73
+ # Calculate how many tokens should have been regenerated based on the elapsed time
74
+ tokens_to_regenerate = int(
75
+ elapsed_time_in_seconds * regeneration_rate_per_second
76
+ )
77
+
78
+ # Ensure the new token count does not exceed max_tokens
79
+ new_token_count = min(current_tokens + tokens_to_regenerate, max_tokens)
80
+
81
+ # Update the user's token count
82
+ user_info["metadata"]["tokens_left"] = new_token_count
83
+
84
+ await update_user_info(user_info)
85
+
86
+
87
+ def get_num_tokens(text, model):
88
+ encoding = tiktoken.encoding_for_model(model)
89
+ tokens = encoding.encode(text)
90
+ return len(tokens)
apps/ai_tutor/private/placeholder_students_file.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "[email protected]": ["student", "bu"],
3
+ "[email protected]": ["student", "bu"],
4
+ "[email protected]": ["admin", "instructor", "bu"]
5
+ }
apps/ai_tutor/public/assets/images/avatars/ai-tutor.png ADDED
apps/ai_tutor/public/assets/images/avatars/ai_tutor.png ADDED
apps/ai_tutor/public/assets/images/starter_icons/acastusphoton-svgrepo-com.svg ADDED
apps/ai_tutor/public/assets/images/starter_icons/adv-screen-recorder-svgrepo-com.svg ADDED
apps/ai_tutor/public/assets/images/starter_icons/alarmy-svgrepo-com.svg ADDED
apps/ai_tutor/public/assets/images/starter_icons/calendar-samsung-17-svgrepo-com.svg ADDED
apps/ai_tutor/public/files/students_encrypted.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"0645db6f7b415e3b04a4fc327151c3c7bbcd25ec546ee0b3604957b571a79bc2": ["instructor", "bu"], "51ebf87ac51618300acfef8bfa9768fdee40e2d3f39cfb4ae8a76722ee336de4": ["admin", "instructor", "bu"], "7810b25bef84317130e2a59da978ee716bb96f6a8a9296c051b7ad4108aa8e6a": ["instructor", "bu"], "a95f36e2700c554639d3522834b47733f5ed1f05c5a43d04ac2575571dd43563": ["student", "bu"]}
apps/ai_tutor/public/files/test.css ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ a[href*='https://github.com/Chainlit/chainlit'] {
2
+ visibility: hidden;
3
+ }
4
+
5
+ /* Hide the default avatar image */
6
+ .MuiAvatar-root img.MuiAvatar-img {
7
+ display: none;
8
+ }
9
+
10
+ /* Target the container of the image and set a custom background image */
11
+ .MuiAvatar-root.MuiAvatar-circular.css-m2icte {
12
+ background-image: url('/public/assets/images/avatars/ai-tutor.png'); /* Replace with your custom image URL */
13
+ background-size: cover; /* Ensure the image covers the entire container */
14
+ background-position: center; /* Center the image */
15
+ width: 100px; /* Ensure the dimensions match the original */
16
+ height: 100px; /* Ensure the dimensions match the original */
17
+ border-radius: 50%; /* Maintain circular shape */
18
+ }
19
+ .MuiAvatar-root.MuiAvatar-circular.css-v72an7 {
20
+ background-image: url('/public/assets/images/avatars/ai-tutor.png'); /* Replace with your custom image URL */
21
+ background-size: cover; /* Ensure the image covers the entire container */
22
+ background-position: center; /* Center the image */
23
+ width: 40px; /* Ensure the dimensions match the original */
24
+ height: 40px; /* Ensure the dimensions match the original */
25
+ border-radius: 50%; /* Maintain circular shape */
26
+ }
27
+
28
+ .MuiStack-root.css-14k6mw7 img {
29
+ content: url('/public/assets/images/avatars/ai-tutor.png'); /* Replace with the path to your custom image */
30
+ max-height: 45px; /* Ensure the height remains consistent */
31
+ max-width: 45px; /* Ensure the width remains consistent */
32
+ }
apps/ai_tutor/public/logo_dark.png ADDED
apps/ai_tutor/public/logo_light.png ADDED
apps/ai_tutor/storage/data/urls.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://dl4ds.github.io/sp2024/
apps/ai_tutor/templates/cooldown.html ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Cooldown Period | Terrier Tutor</title>
7
+ <style>
8
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap');
9
+
10
+ body, html {
11
+ margin: 0;
12
+ padding: 0;
13
+ font-family: 'Inter', sans-serif;
14
+ background-color: #f7f7f7;
15
+ background-image: url('https://www.transparenttextures.com/patterns/cubes.png');
16
+ background-repeat: repeat;
17
+ display: flex;
18
+ align-items: center;
19
+ justify-content: center;
20
+ height: 100vh;
21
+ color: #333;
22
+ }
23
+
24
+ .container {
25
+ background: rgba(255, 255, 255, 0.9);
26
+ border: 1px solid #ddd;
27
+ border-radius: 8px;
28
+ width: 100%;
29
+ max-width: 400px;
30
+ padding: 50px;
31
+ box-sizing: border-box;
32
+ text-align: center;
33
+ box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1);
34
+ backdrop-filter: blur(10px);
35
+ -webkit-backdrop-filter: blur(10px);
36
+ }
37
+
38
+ .avatar {
39
+ width: 90px;
40
+ height: 90px;
41
+ border-radius: 50%;
42
+ margin-bottom: 25px;
43
+ border: 2px solid #ddd;
44
+ }
45
+
46
+ .container h1 {
47
+ margin-bottom: 15px;
48
+ font-size: 24px;
49
+ font-weight: 600;
50
+ color: #1a1a1a;
51
+ }
52
+
53
+ .container p {
54
+ font-size: 16px;
55
+ color: #4a4a4a;
56
+ margin-bottom: 30px;
57
+ line-height: 1.5;
58
+ }
59
+
60
+ .cooldown-message {
61
+ font-size: 16px;
62
+ color: #333;
63
+ margin-bottom: 30px;
64
+ }
65
+
66
+ .tokens-left {
67
+ font-size: 14px;
68
+ color: #333;
69
+ margin-bottom: 30px;
70
+ font-weight: 600;
71
+ }
72
+
73
+ .button {
74
+ padding: 12px 0;
75
+ margin: 12px 0;
76
+ font-size: 14px;
77
+ border-radius: 6px;
78
+ cursor: pointer;
79
+ width: 100%;
80
+ border: 1px solid #4285F4;
81
+ background-color: #fff;
82
+ color: #4285F4;
83
+ transition: background-color 0.3s ease, border-color 0.3s ease;
84
+ display: none;
85
+ }
86
+
87
+ .button.start-tutor {
88
+ display: none;
89
+ }
90
+
91
+ .button:hover {
92
+ background-color: #e0e0e0;
93
+ border-color: #357ae8;
94
+ }
95
+
96
+ .sign-out-button {
97
+ border: 1px solid #FF4C4C;
98
+ background-color: #fff;
99
+ color: #FF4C4C;
100
+ display: block;
101
+ }
102
+
103
+ .sign-out-button:hover {
104
+ background-color: #ffe6e6;
105
+ border-color: #e04343;
106
+ color: #e04343;
107
+ }
108
+
109
+ #countdown {
110
+ font-size: 14px;
111
+ color: #555;
112
+ margin-bottom: 20px;
113
+ }
114
+
115
+ .footer {
116
+ font-size: 12px;
117
+ color: #777;
118
+ margin-top: 20px;
119
+ }
120
+ </style>
121
+ </head>
122
+ <body>
123
+ <div class="container">
124
+ <img src="/public/assets/images/avatars/ai-tutor.png" alt="AI Tutor Avatar" class="avatar">
125
+ <h1>Hello, {{ username }}</h1>
126
+ <p>It seems like you need to wait a bit before starting a new session.</p>
127
+ <p class="cooldown-message">Time remaining until the cooldown period ends:</p>
128
+ <p id="countdown"></p>
129
+ <p class="tokens-left">Tokens Left: <span id="tokensLeft">{{ tokens_left }}</span></p>
130
+ <button id="startTutorBtn" class="button start-tutor" onclick="startTutor()">Start AI Tutor</button>
131
+ <form action="/logout" method="get">
132
+ <button type="submit" class="button sign-out-button">Sign Out</button>
133
+ </form>
134
+ <div class="footer">Reload the page to update token stats</div>
135
+ </div>
136
+ <script>
137
+ function startCountdown(endTime) {
138
+ const countdownElement = document.getElementById('countdown');
139
+ const startTutorBtn = document.getElementById('startTutorBtn');
140
+ const endTimeDate = new Date(endTime);
141
+
142
+ function updateCountdown() {
143
+ const now = new Date();
144
+ const timeLeft = endTimeDate.getTime() - now.getTime();
145
+
146
+ if (timeLeft <= 0) {
147
+ countdownElement.textContent = "Cooldown period has ended.";
148
+ startTutorBtn.style.display = "block";
149
+ } else {
150
+ const hours = Math.floor(timeLeft / 1000 / 60 / 60);
151
+ const minutes = Math.floor((timeLeft / 1000 / 60) % 60);
152
+ const seconds = Math.floor((timeLeft / 1000) % 60);
153
+ countdownElement.textContent = `${hours}h ${minutes}m ${seconds}s`;
154
+ }
155
+ }
156
+
157
+ updateCountdown();
158
+ setInterval(updateCountdown, 1000);
159
+ }
160
+
161
+ function startTutor() {
162
+ window.location.href = "/start-tutor";
163
+ }
164
+
165
+ function updateTokensLeft() {
166
+ fetch('/get-tokens-left')
167
+ .then(response => response.json())
168
+ .then(data => {
169
+ document.getElementById('tokensLeft').textContent = data.tokens_left;
170
+ })
171
+ .catch(error => console.error('Error fetching tokens:', error));
172
+ }
173
+
174
+ // Start the countdown
175
+ startCountdown("{{ cooldown_end_time }}");
176
+
177
+ // Update tokens left when the page loads
178
+ updateTokensLeft();
179
+ </script>
180
+ </body>
181
+ </html>
apps/ai_tutor/templates/dashboard.html ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Dashboard | Terrier Tutor</title>
7
+ <style>
8
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap');
9
+
10
+ body, html {
11
+ margin: 0;
12
+ padding: 0;
13
+ font-family: 'Inter', sans-serif;
14
+ background-color: #f7f7f7; /* Light gray background */
15
+ background-image: url('https://www.transparenttextures.com/patterns/cubes.png'); /* Subtle geometric pattern */
16
+ background-repeat: repeat;
17
+ display: flex;
18
+ align-items: center;
19
+ justify-content: center;
20
+ height: 100vh;
21
+ color: #333;
22
+ }
23
+
24
+ .container {
25
+ background: rgba(255, 255, 255, 0.9);
26
+ border: 1px solid #ddd;
27
+ border-radius: 8px;
28
+ width: 100%;
29
+ max-width: 400px;
30
+ padding: 40px;
31
+ box-sizing: border-box;
32
+ text-align: center;
33
+ box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1);
34
+ backdrop-filter: blur(10px);
35
+ -webkit-backdrop-filter: blur(10px);
36
+ }
37
+
38
+ .avatar {
39
+ width: 90px;
40
+ height: 90px;
41
+ border-radius: 50%;
42
+ margin-bottom: 20px;
43
+ border: 2px solid #ddd;
44
+ }
45
+
46
+ .container h1 {
47
+ margin-bottom: 20px;
48
+ font-size: 26px;
49
+ font-weight: 600;
50
+ color: #1a1a1a;
51
+ }
52
+
53
+ .container p {
54
+ font-size: 15px;
55
+ color: #4a4a4a;
56
+ margin-bottom: 25px;
57
+ line-height: 1.5;
58
+ }
59
+
60
+ .tokens-left {
61
+ font-size: 17px;
62
+ color: #333;
63
+ margin-bottom: 10px;
64
+ font-weight: 600;
65
+ }
66
+
67
+ .all-time-tokens {
68
+ font-size: 14px; /* Reduced font size */
69
+ color: #555;
70
+ margin-bottom: 30px;
71
+ font-weight: 500;
72
+ white-space: nowrap; /* Prevents breaking to a new line */
73
+ }
74
+
75
+ .button {
76
+ padding: 12px 0;
77
+ margin: 12px 0;
78
+ font-size: 15px;
79
+ border-radius: 6px;
80
+ cursor: pointer;
81
+ width: 100%;
82
+ border: 1px solid #4285F4; /* Button border color */
83
+ background-color: #fff; /* Button background color */
84
+ color: #4285F4; /* Button text color */
85
+ transition: background-color 0.3s ease, border-color 0.3s ease;
86
+ }
87
+
88
+ .button:hover {
89
+ background-color: #e0e0e0;
90
+ border-color: #357ae8; /* Darker blue for hover */
91
+ }
92
+
93
+ .start-button {
94
+ border: 1px solid #4285F4;
95
+ color: #4285F4;
96
+ background-color: #fff;
97
+ }
98
+
99
+ .start-button:hover {
100
+ background-color: #e0f0ff; /* Light blue on hover */
101
+ border-color: #357ae8; /* Darker blue for hover */
102
+ color: #357ae8; /* Blue text on hover */
103
+ }
104
+
105
+ .sign-out-button {
106
+ border: 1px solid #FF4C4C;
107
+ background-color: #fff;
108
+ color: #FF4C4C;
109
+ }
110
+
111
+ .sign-out-button:hover {
112
+ background-color: #ffe6e6; /* Light red on hover */
113
+ border-color: #e04343; /* Darker red for hover */
114
+ color: #e04343; /* Red text on hover */
115
+ }
116
+
117
+ .footer {
118
+ font-size: 12px;
119
+ color: #777;
120
+ margin-top: 25px;
121
+ }
122
+ </style>
123
+ </head>
124
+ <body>
125
+ <div class="container">
126
+ <img src="/public/assets/images/avatars/ai-tutor.png" alt="AI Tutor Avatar" class="avatar">
127
+ <h1>Welcome, {{ username }}</h1>
128
+ <p>Ready to start your AI tutoring session?</p>
129
+ <p class="tokens-left">Tokens Left: {{ tokens_left }}</p>
130
+ <p class="all-time-tokens">All-Time Tokens Allocated: {{ all_time_tokens_allocated }} / {{ total_tokens_allocated }}</p>
131
+ <form action="/start-tutor" method="post">
132
+ <button type="submit" class="button start-button">Start AI Tutor</button>
133
+ </form>
134
+ <form action="/logout" method="get">
135
+ <button type="submit" class="button sign-out-button">Sign Out</button>
136
+ </form>
137
+ <div class="footer">Reload the page to update token stats</div>
138
+ </div>
139
+ <script>
140
+ let token = "{{ jwt_token }}";
141
+ console.log("Token: ", token);
142
+ localStorage.setItem('token', token);
143
+ </script>
144
+ </body>
145
+ </html>
apps/ai_tutor/templates/error.html ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Error | Terrier Tutor</title>
7
+ <style>
8
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap');
9
+
10
+ body, html {
11
+ margin: 0;
12
+ padding: 0;
13
+ font-family: 'Inter', sans-serif;
14
+ background-color: #f7f7f7; /* Light gray background */
15
+ background-image: url('https://www.transparenttextures.com/patterns/cubes.png'); /* Subtle geometric pattern */
16
+ background-repeat: repeat;
17
+ display: flex;
18
+ align-items: center;
19
+ justify-content: center;
20
+ height: 100vh;
21
+ color: #333;
22
+ }
23
+
24
+ .container {
25
+ background: rgba(255, 255, 255, 0.9);
26
+ border: 1px solid #ddd;
27
+ border-radius: 8px;
28
+ width: 100%;
29
+ max-width: 400px;
30
+ padding: 50px;
31
+ box-sizing: border-box;
32
+ text-align: center;
33
+ box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1);
34
+ backdrop-filter: blur(10px);
35
+ -webkit-backdrop-filter: blur(10px);
36
+ }
37
+
38
+ .container h1 {
39
+ margin-bottom: 20px;
40
+ font-size: 26px;
41
+ font-weight: 600;
42
+ color: #1a1a1a;
43
+ }
44
+
45
+ .container p {
46
+ font-size: 18px;
47
+ color: #4a4a4a;
48
+ margin-bottom: 35px;
49
+ line-height: 1.5;
50
+ }
51
+
52
+ .button {
53
+ padding: 14px 0;
54
+ margin: 12px 0;
55
+ font-size: 16px;
56
+ border-radius: 6px;
57
+ cursor: pointer;
58
+ width: 100%;
59
+ border: 1px solid #ccc;
60
+ background-color: #007BFF;
61
+ color: #fff;
62
+ transition: background-color 0.3s ease, border-color 0.3s ease;
63
+ }
64
+
65
+ .button:hover {
66
+ background-color: #0056b3;
67
+ border-color: #0056b3;
68
+ }
69
+
70
+ .error-box {
71
+ background-color: #2d2d2d;
72
+ color: #fff;
73
+ padding: 10px;
74
+ margin-top: 20px;
75
+ font-family: 'Courier New', Courier, monospace;
76
+ text-align: left;
77
+ overflow-x: auto;
78
+ white-space: pre-wrap;
79
+ border-radius: 5px;
80
+ }
81
+ </style>
82
+ </head>
83
+ <body>
84
+ <div class="container">
85
+ <h1>Oops! Something went wrong...</h1>
86
+ <p>An unexpected error occurred. The details are below:</p>
87
+ <div class="error-box">
88
+ <code>{{ error }}</code>
89
+ </div>
90
+ <form action="/" method="get">
91
+ <button type="submit" class="button">Return to Home</button>
92
+ </form>
93
+ </div>
94
+ </body>
95
+ </html>
apps/ai_tutor/templates/error_404.html ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>404 - Not Found</title>
7
+ <style>
8
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap');
9
+
10
+ body, html {
11
+ margin: 0;
12
+ padding: 0;
13
+ font-family: 'Inter', sans-serif;
14
+ background-color: #f7f7f7; /* Light gray background */
15
+ background-image: url('https://www.transparenttextures.com/patterns/cubes.png'); /* Subtle geometric pattern */
16
+ background-repeat: repeat;
17
+ display: flex;
18
+ align-items: center;
19
+ justify-content: center;
20
+ height: 100vh;
21
+ color: #333;
22
+ }
23
+
24
+ .container {
25
+ background: rgba(255, 255, 255, 0.9);
26
+ border: 1px solid #ddd;
27
+ border-radius: 8px;
28
+ width: 100%;
29
+ max-width: 400px;
30
+ padding: 50px;
31
+ box-sizing: border-box;
32
+ text-align: center;
33
+ box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1);
34
+ backdrop-filter: blur(10px);
35
+ -webkit-backdrop-filter: blur(10px);
36
+ }
37
+
38
+ .container h1 {
39
+ margin-bottom: 20px;
40
+ font-size: 26px;
41
+ font-weight: 600;
42
+ color: #1a1a1a;
43
+ }
44
+
45
+ .container p {
46
+ font-size: 18px;
47
+ color: #4a4a4a;
48
+ margin-bottom: 35px;
49
+ line-height: 1.5;
50
+ }
51
+
52
+ .button {
53
+ padding: 14px 0;
54
+ margin: 12px 0;
55
+ font-size: 16px;
56
+ border-radius: 6px;
57
+ cursor: pointer;
58
+ width: 100%;
59
+ border: 1px solid #ccc;
60
+ background-color: #007BFF;
61
+ color: #fff;
62
+ transition: background-color 0.3s ease, border-color 0.3s ease;
63
+ }
64
+
65
+ .button:hover {
66
+ background-color: #0056b3;
67
+ border-color: #0056b3;
68
+ }
69
+ </style>
70
+ </head>
71
+ <body>
72
+ <div class="container">
73
+ <h1>You have ventured into the abyss...</h1>
74
+ <p>To get back to reality, click the button below.</p>
75
+ <form action="/" method="get">
76
+ <button type="submit" class="button">Return to Home</button>
77
+ </form>
78
+ </div>
79
+ </body>
80
+ </html>
apps/ai_tutor/templates/login.html ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Login | Terrier Tutor</title>
7
+ <style>
8
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap');
9
+
10
+ body, html {
11
+ margin: 0;
12
+ padding: 0;
13
+ font-family: 'Inter', sans-serif;
14
+ background-color: #f7f7f7; /* Light gray background */
15
+ background-image: url('https://www.transparenttextures.com/patterns/cubes.png'); /* Subtle geometric pattern */
16
+ background-repeat: repeat;
17
+ display: flex;
18
+ align-items: center;
19
+ justify-content: center;
20
+ height: 100vh;
21
+ color: #333;
22
+ }
23
+
24
+ .container {
25
+ background: rgba(255, 255, 255, 0.9);
26
+ border: 1px solid #ddd;
27
+ border-radius: 8px;
28
+ width: 100%;
29
+ max-width: 400px;
30
+ padding: 50px;
31
+ box-sizing: border-box;
32
+ text-align: center;
33
+ box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1);
34
+ backdrop-filter: blur(10px);
35
+ -webkit-backdrop-filter: blur(10px);
36
+ }
37
+
38
+ .avatar {
39
+ width: 90px;
40
+ height: 90px;
41
+ border-radius: 50%;
42
+ margin-bottom: 25px;
43
+ border: 2px solid #ddd;
44
+ }
45
+
46
+ .container h1 {
47
+ margin-bottom: 15px;
48
+ font-size: 24px;
49
+ font-weight: 600;
50
+ color: #1a1a1a;
51
+ }
52
+
53
+ .container p {
54
+ font-size: 16px;
55
+ color: #4a4a4a;
56
+ margin-bottom: 30px;
57
+ line-height: 1.5;
58
+ }
59
+
60
+ .button {
61
+ padding: 12px 0;
62
+ margin: 12px 0;
63
+ font-size: 14px;
64
+ border-radius: 6px;
65
+ cursor: pointer;
66
+ width: 100%;
67
+ border: 1px solid #4285F4; /* Google button border color */
68
+ background-color: #fff; /* Guest button color */
69
+ color: #4285F4; /* Google button text color */
70
+ transition: background-color 0.3s ease, border-color 0.3s ease;
71
+ }
72
+
73
+ .button:hover {
74
+ background-color: #e0f0ff; /* Light blue on hover */
75
+ border-color: #357ae8; /* Darker blue for hover */
76
+ color: #357ae8; /* Blue text on hover */
77
+ }
78
+
79
+ .footer {
80
+ margin-top: 40px;
81
+ font-size: 15px;
82
+ color: #666;
83
+ text-align: center; /* Center the text in the footer */
84
+ }
85
+
86
+ .footer a {
87
+ color: #333;
88
+ text-decoration: none;
89
+ font-weight: 500;
90
+ display: inline-flex;
91
+ align-items: center;
92
+ justify-content: center; /* Center the content of the links */
93
+ transition: color 0.3s ease;
94
+ margin-bottom: 8px;
95
+ width: 100%; /* Make the link block level */
96
+ }
97
+
98
+ .footer a:hover {
99
+ color: #000;
100
+ }
101
+
102
+ .footer svg {
103
+ margin-right: 8px;
104
+ fill: currentColor;
105
+ }
106
+ </style>
107
+ </head>
108
+ <body>
109
+ <div class="container">
110
+ <img src="/public/assets/images/avatars/ai-tutor.png" alt="AI Tutor Avatar" class="avatar">
111
+ <h1>Terrier Tutor</h1>
112
+ <p>Welcome to the DS598 AI Tutor. Please sign in to continue.</p>
113
+ <form action="/login/google" method="get">
114
+ <button type="submit" class="button">Sign in with Google</button>
115
+ </form>
116
+ <div class="footer">
117
+ <a href="{{ GITHUB_REPO }}" target="_blank">
118
+ <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24">
119
+ <path d="M12 .5C5.596.5.5 5.596.5 12c0 5.098 3.292 9.414 7.852 10.94.574.105.775-.249.775-.553 0-.272-.01-1.008-.015-1.98-3.194.694-3.87-1.544-3.87-1.544-.521-1.324-1.273-1.676-1.273-1.676-1.04-.714.079-.7.079-.7 1.148.08 1.75 1.181 1.75 1.181 1.022 1.752 2.683 1.246 3.34.954.104-.74.4-1.246.73-1.533-2.551-.292-5.234-1.276-5.234-5.675 0-1.253.447-2.277 1.181-3.079-.12-.293-.51-1.47.113-3.063 0 0 .96-.307 3.15 1.174.913-.255 1.892-.383 2.867-.388.975.005 1.954.133 2.868.388 2.188-1.481 3.147-1.174 3.147-1.174.624 1.593.233 2.77.114 3.063.735.802 1.18 1.826 1.18 3.079 0 4.407-2.688 5.38-5.248 5.668.413.354.782 1.049.782 2.113 0 1.526-.014 2.757-.014 3.132 0 .307.198.662.783.553C20.21 21.411 23.5 17.096 23.5 12c0-6.404-5.096-11.5-11.5-11.5z"/>
120
+ </svg>
121
+ View on GitHub
122
+ </a>
123
+ <a href="{{ DOCS_WEBSITE }}" target="_blank">
124
+ <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24">
125
+ <path d="M19 2H8c-1.103 0-2 .897-2 2v16c0 1.103.897 2 2 2h12c1.103 0 2-.897 2-2V7l-5-5zm0 2l.001 4H14V4h5zm-1 14H9V4h4v6h6v8zM7 4H6v16c0 1.654 1.346 3 3 3h9v-2H9c-.551 0-1-.449-1-1V4z"/>
126
+ </svg>
127
+ View Docs
128
+ </a>
129
+ </div>
130
+ </div>
131
+ </body>
132
+ </html>
apps/ai_tutor/templates/logout.html ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Logout</title>
5
+ <script>
6
+ window.onload = function() {
7
+ fetch('/chainlit_tutor/logout', {
8
+ method: 'POST',
9
+ credentials: 'include' // Ensure cookies are sent
10
+ }).then(() => {
11
+ window.location.href = '/';
12
+ }).catch(error => {
13
+ console.error('Logout failed:', error);
14
+ });
15
+ };
16
+ </script>
17
+ </head>
18
+ <body>
19
+ <p>Logging out... If you are not redirected, <a href="/">click here</a>.</p>
20
+ </body>
21
+ </html>
apps/ai_tutor/templates/unauthorized.html ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Access Restricted</title>
7
+ <style>
8
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap');
9
+
10
+ body, html {
11
+ margin: 0;
12
+ padding: 0;
13
+ font-family: 'Inter', sans-serif;
14
+ background-color: #f7f7f7; /* Light gray background */
15
+ background-image: url('https://www.transparenttextures.com/patterns/cubes.png'); /* Subtle geometric pattern */
16
+ background-repeat: repeat;
17
+ display: flex;
18
+ align-items: center;
19
+ justify-content: center;
20
+ height: 100vh;
21
+ color: #333;
22
+ }
23
+
24
+ .container {
25
+ background: rgba(255, 255, 255, 0.9);
26
+ border: 1px solid #ddd;
27
+ border-radius: 8px;
28
+ width: 100%;
29
+ max-width: 400px;
30
+ padding: 50px;
31
+ box-sizing: border-box;
32
+ text-align: center;
33
+ box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1);
34
+ backdrop-filter: blur(10px);
35
+ -webkit-backdrop-filter: blur(10px);
36
+ }
37
+
38
+ .avatar {
39
+ width: 90px;
40
+ height: 90px;
41
+ border-radius: 50%;
42
+ margin-bottom: 25px;
43
+ border: 2px solid #ddd;
44
+ }
45
+
46
+ .container h1 {
47
+ margin-bottom: 20px;
48
+ font-size: 26px;
49
+ font-weight: 600;
50
+ color: #1a1a1a;
51
+ }
52
+
53
+ .container p {
54
+ font-size: 18px;
55
+ color: #4a4a4a;
56
+ margin-bottom: 35px;
57
+ line-height: 1.5;
58
+ }
59
+
60
+ .button {
61
+ padding: 14px 0;
62
+ margin: 12px 0;
63
+ font-size: 16px;
64
+ border-radius: 6px;
65
+ cursor: pointer;
66
+ width: 100%;
67
+ border: 1px solid #ccc;
68
+ background-color: #007BFF;
69
+ color: #fff;
70
+ transition: background-color 0.3s ease, border-color 0.3s ease;
71
+ }
72
+
73
+ .button:hover {
74
+ background-color: #0056b3;
75
+ border-color: #0056b3;
76
+ }
77
+ </style>
78
+ </head>
79
+ <body>
80
+ <div class="container">
81
+ <img src="/public/assets/images/avatars/ai-tutor.png" alt="AI Tutor Avatar" class="avatar">
82
+ <h1>Access Restricted</h1>
83
+ <p>
84
+ We're currently testing things out for the <strong>DS598</strong> course.
85
+ Access is restricted to students of the course. If you're enrolled in <strong>DS598</strong> and seeing this message,
86
+ please reach out to us, and we'll help you get access.<br><br>
87
+ <em>P.S. Don't forget to use your BU email when logging in!</em>
88
+ </p>
89
+ <form action="/" method="get">
90
+ <button type="submit" class="button">Return to Home</button>
91
+ </form>
92
+ </div>
93
+ </body>
94
+ </html>
apps/chainlit_base/.chainlit/config.toml ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+
6
+ # List of environment variables to be provided by each user to use the app.
7
+ user_env = []
8
+
9
+ # Duration (in seconds) during which the session is saved when the connection is lost
10
+ session_timeout = 3600
11
+
12
+ # Enable third parties caching (e.g LangChain cache)
13
+ cache = false
14
+
15
+ # Authorized origins
16
+ allow_origins = ["*"]
17
+
18
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
19
+ # follow_symlink = false
20
+
21
+ [features]
22
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
23
+ unsafe_allow_html = false
24
+
25
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
26
+ latex = false
27
+
28
+ # Automatically tag threads with the current chat profile (if a chat profile is used)
29
+ auto_tag_thread = true
30
+
31
+ # Authorize users to spontaneously upload files with messages
32
+ [features.spontaneous_file_upload]
33
+ enabled = true
34
+ accept = ["*/*"]
35
+ max_files = 20
36
+ max_size_mb = 500
37
+
38
+ [features.audio]
39
+ # Threshold for audio recording
40
+ min_decibels = -45
41
+ # Delay for the user to start speaking in MS
42
+ initial_silence_timeout = 3000
43
+ # Delay for the user to continue speaking in MS. If the user stops speaking for this duration, the recording will stop.
44
+ silence_timeout = 1500
45
+ # Above this duration (MS), the recording will forcefully stop.
46
+ max_duration = 15000
47
+ # Duration of the audio chunks in MS
48
+ chunk_duration = 1000
49
+ # Sample rate of the audio
50
+ sample_rate = 44100
51
+
52
+ edit_message = true
53
+
54
+ [UI]
55
+ # Name of the assistant.
56
+ name = "Assistant"
57
+
58
+ # Description of the assistant. This is used for HTML tags.
59
+ # description = ""
60
+
61
+ # Large size content are by default collapsed for a cleaner ui
62
+ default_collapse_content = true
63
+
64
+ # Chain of Thought (CoT) display mode. Can be "hidden", "tool_call" or "full".
65
+ cot = "full"
66
+
67
+ # Link to your github repo. This will add a github button in the UI's header.
68
+ # github = ""
69
+
70
+ # Specify a CSS file that can be used to customize the user interface.
71
+ # The CSS file can be served from the public directory or via an external link.
72
+ custom_css = "/public/files/test.css"
73
+
74
+ # Specify a Javascript file that can be used to customize the user interface.
75
+ # The Javascript file can be served from the public directory.
76
+ # custom_js = "/public/test.js"
77
+
78
+ # Specify a custom font url.
79
+ # custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap"
80
+
81
+ # Specify a custom meta image url.
82
+ # custom_meta_image_url = "https://chainlit-cloud.s3.eu-west-3.amazonaws.com/logo/chainlit_banner.png"
83
+
84
+ # Specify a custom build directory for the frontend.
85
+ # This can be used to customize the frontend code.
86
+ # Be careful: If this is a relative path, it should not start with a slash.
87
+ # custom_build = "./public/build"
88
+
89
+ [UI.theme]
90
+ default = "dark"
91
+ #layout = "wide"
92
+ #font_family = "Inter, sans-serif"
93
+ # Override default MUI light theme. (Check theme.ts)
94
+ [UI.theme.light]
95
+ #background = "#FAFAFA"
96
+ #paper = "#FFFFFF"
97
+
98
+ [UI.theme.light.primary]
99
+ #main = "#F80061"
100
+ #dark = "#980039"
101
+ #light = "#FFE7EB"
102
+ [UI.theme.light.text]
103
+ #primary = "#212121"
104
+ #secondary = "#616161"
105
+
106
+ # Override default MUI dark theme. (Check theme.ts)
107
+ [UI.theme.dark]
108
+ #background = "#FAFAFA"
109
+ #paper = "#FFFFFF"
110
+
111
+ [UI.theme.dark.primary]
112
+ #main = "#F80061"
113
+ #dark = "#980039"
114
+ #light = "#FFE7EB"
115
+ [UI.theme.dark.text]
116
+ #primary = "#EEEEEE"
117
+ #secondary = "#BDBDBD"
118
+
119
+ [meta]
120
+ generated_by = "1.1.402"
apps/chainlit_base/chainlit.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Welcome to Chainlit! 🚀🤖
2
+
3
+ Hi there, Developer! 👋 We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs.
4
+
5
+ ## Useful Links 🔗
6
+
7
+ - **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) 📚
8
+ - **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! 💬
9
+
10
+ We can't wait to see what you create with Chainlit! Happy coding! 💻😊
11
+
12
+ ## Welcome screen
13
+
14
+ To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty.
apps/chainlit_base/chainlit_base.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import chainlit.data as cl_data
2
+ import asyncio
3
+ from typing import Any, Dict, no_type_check
4
+ import chainlit as cl
5
+ from edubotics_core.chat.llm_tutor import LLMTutor
6
+ from edubotics_core.chat.helpers import (
7
+ get_sources,
8
+ get_history_setup_llm,
9
+ )
10
+ import copy
11
+ from langchain_community.callbacks import get_openai_callback
12
+ from config.config_manager import config_manager
13
+
14
+ USER_TIMEOUT = 60_000
15
+ SYSTEM = "System"
16
+ LLM = "AI Tutor"
17
+ AGENT = "Agent"
18
+ YOU = "User"
19
+ ERROR = "Error"
20
+
21
+ config = config_manager.get_config().dict()
22
+
23
+
24
+ class Chatbot:
25
+ def __init__(self, config):
26
+ """
27
+ Initialize the Chatbot class.
28
+ """
29
+ self.config = config
30
+
31
+ @no_type_check
32
+ async def setup_llm(self):
33
+ """
34
+ Set up the LLM with the provided settings. Update the configuration and initialize the LLM tutor.
35
+
36
+ #TODO: Clean this up.
37
+ """
38
+
39
+ llm_settings = cl.user_session.get("llm_settings", {})
40
+ (
41
+ chat_profile,
42
+ retriever_method,
43
+ memory_window,
44
+ llm_style,
45
+ generate_follow_up,
46
+ chunking_mode,
47
+ ) = (
48
+ llm_settings.get("chat_model"),
49
+ llm_settings.get("retriever_method"),
50
+ llm_settings.get("memory_window"),
51
+ llm_settings.get("llm_style"),
52
+ llm_settings.get("follow_up_questions"),
53
+ llm_settings.get("chunking_mode"),
54
+ )
55
+
56
+ chain = cl.user_session.get("chain")
57
+ memory_list = cl.user_session.get(
58
+ "memory",
59
+ (
60
+ list(chain.store.values())[0].messages
61
+ if len(chain.store.values()) > 0
62
+ else []
63
+ ),
64
+ )
65
+ conversation_list = get_history_setup_llm(memory_list)
66
+
67
+ old_config = copy.deepcopy(self.config)
68
+ self.config["vectorstore"]["db_option"] = retriever_method
69
+ self.config["llm_params"]["memory_window"] = memory_window
70
+ self.config["llm_params"]["llm_style"] = llm_style
71
+ self.config["llm_params"]["llm_loader"] = chat_profile
72
+ self.config["llm_params"]["generate_follow_up"] = generate_follow_up
73
+ self.config["splitter_options"]["chunking_mode"] = chunking_mode
74
+
75
+ self.llm_tutor.update_llm(
76
+ old_config, self.config
77
+ ) # update only llm attributes that are changed
78
+ self.chain = self.llm_tutor.qa_bot(
79
+ memory=conversation_list,
80
+ )
81
+
82
+ cl.user_session.set("chain", self.chain)
83
+ cl.user_session.set("llm_tutor", self.llm_tutor)
84
+
85
+ @no_type_check
86
+ async def update_llm(self, new_settings: Dict[str, Any]):
87
+ """
88
+ Update the LLM settings and reinitialize the LLM with the new settings.
89
+
90
+ Args:
91
+ new_settings (Dict[str, Any]): The new settings to update.
92
+ """
93
+ cl.user_session.set("llm_settings", new_settings)
94
+ await self.inform_llm_settings()
95
+ await self.setup_llm()
96
+
97
+ async def make_llm_settings_widgets(self, config=None):
98
+ """
99
+ Create and send the widgets for LLM settings configuration.
100
+
101
+ Args:
102
+ config: The configuration to use for setting up the widgets.
103
+ """
104
+ config = config or self.config
105
+ await cl.ChatSettings(
106
+ [
107
+ cl.input_widget.Select(
108
+ id="chat_model",
109
+ label="Model Name (Default GPT-3)",
110
+ values=["local_llm", "gpt-3.5-turbo-1106", "gpt-4", "gpt-4o-mini"],
111
+ initial_index=[
112
+ "local_llm",
113
+ "gpt-3.5-turbo-1106",
114
+ "gpt-4",
115
+ "gpt-4o-mini",
116
+ ].index(config["llm_params"]["llm_loader"]),
117
+ ),
118
+ cl.input_widget.Select(
119
+ id="retriever_method",
120
+ label="Retriever (Default FAISS)",
121
+ values=["FAISS", "Chroma", "RAGatouille", "RAPTOR"],
122
+ initial_index=["FAISS", "Chroma", "RAGatouille", "RAPTOR"].index(
123
+ config["vectorstore"]["db_option"]
124
+ ),
125
+ ),
126
+ cl.input_widget.Slider(
127
+ id="memory_window",
128
+ label="Memory Window (Default 3)",
129
+ initial=3,
130
+ min=0,
131
+ max=10,
132
+ step=1,
133
+ ),
134
+ cl.input_widget.Switch(
135
+ id="view_sources", label="View Sources", initial=False
136
+ ),
137
+ cl.input_widget.Switch(
138
+ id="stream_response",
139
+ label="Stream response",
140
+ initial=config["llm_params"]["stream"],
141
+ ),
142
+ cl.input_widget.Select(
143
+ id="chunking_mode",
144
+ label="Chunking mode",
145
+ values=["fixed", "semantic"],
146
+ initial_index=1,
147
+ ),
148
+ cl.input_widget.Switch(
149
+ id="follow_up_questions",
150
+ label="Generate follow up questions",
151
+ initial=False,
152
+ ),
153
+ cl.input_widget.Select(
154
+ id="llm_style",
155
+ label="Type of Conversation (Default Normal)",
156
+ values=["Normal", "ELI5"],
157
+ initial_index=0,
158
+ ),
159
+ ]
160
+ ).send()
161
+
162
+ @no_type_check
163
+ async def inform_llm_settings(self):
164
+ """
165
+ Inform the user about the updated LLM settings and display them as a message.
166
+ """
167
+ await cl.Message(
168
+ author=SYSTEM,
169
+ content="LLM settings have been updated. You can continue with your Query!",
170
+ ).send()
171
+
172
+ async def set_starters(self):
173
+ """
174
+ Set starter messages for the chatbot.
175
+ """
176
+
177
+ return [
178
+ cl.Starter(
179
+ label="recording on Transformers?",
180
+ message="Where can I find the recording for the lecture on Transformers?",
181
+ icon="/public/assets/images/starter_icons/adv-screen-recorder-svgrepo-com.svg",
182
+ ),
183
+ cl.Starter(
184
+ label="where's the slides?",
185
+ message="When are the lectures? I can't find the schedule.",
186
+ icon="/public/assets/images/starter_icons/alarmy-svgrepo-com.svg",
187
+ ),
188
+ cl.Starter(
189
+ label="Due Date?",
190
+ message="When is the final project due?",
191
+ icon="/public/assets/images/starter_icons/calendar-samsung-17-svgrepo-com.svg",
192
+ ),
193
+ cl.Starter(
194
+ label="Explain backprop.",
195
+ message="I didn't understand the math behind backprop, could you explain it?",
196
+ icon="/public/assets/images/starter_icons/acastusphoton-svgrepo-com.svg",
197
+ ),
198
+ ]
199
+
200
+ def rename(self, orig_author: str):
201
+ """
202
+ Rename the original author to a more user-friendly name.
203
+
204
+ Args:
205
+ orig_author (str): The original author's name.
206
+
207
+ Returns:
208
+ str: The renamed author.
209
+ """
210
+ rename_dict = {"Chatbot": LLM}
211
+ return rename_dict.get(orig_author, orig_author)
212
+
213
+ async def start(self):
214
+ """
215
+ Start the chatbot, initialize settings widgets,
216
+ and display and load previous conversation if chat logging is enabled.
217
+ """
218
+
219
+ await self.make_llm_settings_widgets(self.config) # Reload the settings widgets
220
+
221
+ # TODO: remove self.user with cl.user_session.get("user")
222
+ self.user = {
223
+ "user_id": "guest",
224
+ "session_id": cl.context.session.thread_id,
225
+ }
226
+
227
+ memory = cl.user_session.get("memory", [])
228
+ self.llm_tutor = LLMTutor(self.config, user=self.user)
229
+
230
+ self.chain = self.llm_tutor.qa_bot(
231
+ memory=memory,
232
+ )
233
+ self.question_generator = self.llm_tutor.question_generator
234
+ cl.user_session.set("llm_tutor", self.llm_tutor)
235
+ cl.user_session.set("chain", self.chain)
236
+
237
+ async def stream_response(self, response):
238
+ """
239
+ Stream the response from the LLM.
240
+
241
+ Args:
242
+ response: The response from the LLM.
243
+ """
244
+ msg = cl.Message(content="")
245
+ await msg.send()
246
+
247
+ output = {}
248
+ for chunk in response:
249
+ if "answer" in chunk:
250
+ await msg.stream_token(chunk["answer"])
251
+
252
+ for key in chunk:
253
+ if key not in output:
254
+ output[key] = chunk[key]
255
+ else:
256
+ output[key] += chunk[key]
257
+ return output
258
+
259
+ async def main(self, message):
260
+ """
261
+ Process and Display the Conversation.
262
+
263
+ Args:
264
+ message: The incoming chat message.
265
+ """
266
+
267
+ chain = cl.user_session.get("chain")
268
+ token_count = 0 # initialize token count
269
+ if not chain:
270
+ await self.start() # start the chatbot if the chain is not present
271
+ chain = cl.user_session.get("chain")
272
+
273
+ # update user info with last message time
274
+ llm_settings = cl.user_session.get("llm_settings", {})
275
+ view_sources = llm_settings.get("view_sources", False)
276
+ stream = llm_settings.get("stream_response", False)
277
+ stream = False # Fix streaming
278
+ user_query_dict = {"input": message.content}
279
+ # Define the base configuration
280
+ cb = cl.AsyncLangchainCallbackHandler()
281
+ chain_config = {
282
+ "configurable": {
283
+ "user_id": self.user["user_id"],
284
+ "conversation_id": self.user["session_id"],
285
+ "memory_window": self.config["llm_params"]["memory_window"],
286
+ },
287
+ "callbacks": (
288
+ [cb]
289
+ if cl_data._data_layer and self.config["chat_logging"]["callbacks"]
290
+ else None
291
+ ),
292
+ }
293
+
294
+ with get_openai_callback() as token_count_cb:
295
+ if stream:
296
+ res = chain.stream(user_query=user_query_dict, config=chain_config)
297
+ res = await self.stream_response(res)
298
+ else:
299
+ res = await chain.invoke(
300
+ user_query=user_query_dict,
301
+ config=chain_config,
302
+ )
303
+ token_count += token_count_cb.total_tokens
304
+
305
+ answer = res.get("answer", res.get("result"))
306
+
307
+ answer_with_sources, source_elements, sources_dict = get_sources(
308
+ res, answer, stream=stream, view_sources=view_sources
309
+ )
310
+ answer_with_sources = answer_with_sources.replace("$$", "$")
311
+
312
+ actions = []
313
+
314
+ if self.config["llm_params"]["generate_follow_up"]:
315
+ cb_follow_up = cl.AsyncLangchainCallbackHandler()
316
+ config = {
317
+ "callbacks": (
318
+ [cb_follow_up]
319
+ if cl_data._data_layer and self.config["chat_logging"]["callbacks"]
320
+ else None
321
+ )
322
+ }
323
+ with get_openai_callback() as token_count_cb:
324
+ list_of_questions = await self.question_generator.generate_questions(
325
+ query=user_query_dict["input"],
326
+ response=answer,
327
+ chat_history=res.get("chat_history"),
328
+ context=res.get("context"),
329
+ config=config,
330
+ )
331
+
332
+ token_count += token_count_cb.total_tokens
333
+
334
+ for question in list_of_questions:
335
+ actions.append(
336
+ cl.Action(
337
+ name="follow up question",
338
+ value="example_value",
339
+ description=question,
340
+ label=question,
341
+ )
342
+ )
343
+
344
+ await cl.Message(
345
+ content=answer_with_sources,
346
+ elements=source_elements,
347
+ author=LLM,
348
+ actions=actions,
349
+ ).send()
350
+
351
+ async def on_follow_up(self, action: cl.Action):
352
+ user = cl.user_session.get("user")
353
+ message = await cl.Message(
354
+ content=action.description,
355
+ type="user_message",
356
+ author=user.identifier,
357
+ ).send()
358
+ async with cl.Step(
359
+ name="on_follow_up", type="run", parent_id=message.id
360
+ ) as step:
361
+ await self.main(message)
362
+ step.output = message.content
363
+
364
+
365
+ chatbot = Chatbot(config=config)
366
+
367
+
368
+ async def start_app():
369
+ cl.set_starters(chatbot.set_starters)
370
+ cl.author_rename(chatbot.rename)
371
+ cl.on_chat_start(chatbot.start)
372
+ cl.on_message(chatbot.main)
373
+ cl.on_settings_update(chatbot.update_llm)
374
+ cl.action_callback("follow up question")(chatbot.on_follow_up)
375
+
376
+
377
+ loop = asyncio.get_event_loop()
378
+ if loop.is_running():
379
+ asyncio.ensure_future(start_app())
380
+ else:
381
+ asyncio.run(start_app())
apps/chainlit_base/config/config.yml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_dir: 'storage/logs' # str
2
+ log_chunk_dir: 'storage/logs/chunks' # str
3
+ device: 'cpu' # str [cuda, cpu]
4
+
5
+ vectorstore:
6
+ load_from_HF: True # bool
7
+ reparse_files: True # bool
8
+ data_path: 'storage/data' # str
9
+ url_file_path: 'storage/data/urls.txt' # str
10
+ expand_urls: True # bool
11
+ db_option : 'RAGatouille' # str [FAISS, Chroma, RAGatouille, RAPTOR]
12
+ db_path : 'vectorstores' # str
13
+ model : 'sentence-transformers/all-MiniLM-L6-v2' # str [sentence-transformers/all-MiniLM-L6-v2, text-embedding-ada-002']
14
+ search_top_k : 3 # int
15
+ score_threshold : 0.2 # float
16
+
17
+ faiss_params: # Not used as of now
18
+ index_path: 'vectorstores/faiss.index' # str
19
+ index_type: 'Flat' # str [Flat, HNSW, IVF]
20
+ index_dimension: 384 # int
21
+ index_nlist: 100 # int
22
+ index_nprobe: 10 # int
23
+
24
+ colbert_params:
25
+ index_name: "new_idx" # str
26
+
27
+ llm_params:
28
+ llm_arch: 'langchain' # [langchain]
29
+ use_history: True # bool
30
+ generate_follow_up: False # bool
31
+ memory_window: 3 # int
32
+ llm_style: 'Normal' # str [Normal, ELI5]
33
+ llm_loader: 'gpt-4o-mini' # str [local_llm, gpt-3.5-turbo-1106, gpt-4, gpt-4o-mini]
34
+ openai_params:
35
+ temperature: 0.7 # float
36
+ local_llm_params:
37
+ temperature: 0.7 # float
38
+ repo_id: 'TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF' # HuggingFace repo id
39
+ filename: 'tinyllama-1.1b-chat-v1.0.Q5_0.gguf' # Specific name of gguf file in the repo
40
+ model_path: 'storage/models/tinyllama-1.1b-chat-v1.0.Q5_0.gguf' # Path to the model file
41
+ stream: False # bool
42
+ pdf_reader: 'gpt' # str [llama, pymupdf, gpt]
43
+
44
+ chat_logging:
45
+ log_chat: True # bool
46
+ platform: 'literalai'
47
+ callbacks: True # bool
48
+
49
+ splitter_options:
50
+ use_splitter: True # bool
51
+ split_by_token : True # bool
52
+ remove_leftover_delimiters: True # bool
53
+ remove_chunks: False # bool
54
+ chunking_mode: 'semantic' # str [fixed, semantic]
55
+ chunk_size : 300 # int
56
+ chunk_overlap : 30 # int
57
+ chunk_separators : ["\n\n", "\n", " ", ""] # list of strings
58
+ front_chunks_to_remove : null # int or None
59
+ last_chunks_to_remove : null # int or None
60
+ delimiters_to_remove : ['\t', '\n', ' ', ' '] # list of strings
apps/chainlit_base/config/config_manager.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, conint, confloat, HttpUrl
2
+ from typing import Optional, List
3
+ import yaml
4
+
5
+
6
+ class FaissParams(BaseModel):
7
+ index_path: str = "vectorstores/faiss.index"
8
+ index_type: str = "Flat" # Options: [Flat, HNSW, IVF]
9
+ index_dimension: conint(gt=0) = 384
10
+ index_nlist: conint(gt=0) = 100
11
+ index_nprobe: conint(gt=0) = 10
12
+
13
+
14
+ class ColbertParams(BaseModel):
15
+ index_name: str = "new_idx"
16
+
17
+
18
+ class VectorStoreConfig(BaseModel):
19
+ load_from_HF: bool = True
20
+ reparse_files: bool = True
21
+ data_path: str = "storage/data"
22
+ url_file_path: str = "storage/data/urls.txt"
23
+ expand_urls: bool = True
24
+ db_option: str = "RAGatouille" # Options: [FAISS, Chroma, RAGatouille, RAPTOR]
25
+ db_path: str = "vectorstores"
26
+ model: str = (
27
+ "sentence-transformers/all-MiniLM-L6-v2" # Options: [sentence-transformers/all-MiniLM-L6-v2, text-embedding-ada-002]
28
+ )
29
+ search_top_k: conint(gt=0) = 3
30
+ score_threshold: confloat(ge=0.0, le=1.0) = 0.2
31
+
32
+ faiss_params: Optional[FaissParams] = None
33
+ colbert_params: Optional[ColbertParams] = None
34
+
35
+
36
+ class OpenAIParams(BaseModel):
37
+ temperature: confloat(ge=0.0, le=1.0) = 0.7
38
+
39
+
40
+ class LocalLLMParams(BaseModel):
41
+ temperature: confloat(ge=0.0, le=1.0) = 0.7
42
+ repo_id: str = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF" # HuggingFace repo id
43
+ filename: str = (
44
+ "tinyllama-1.1b-chat-v1.0.Q5_0.gguf" # Specific name of gguf file in the repo
45
+ )
46
+ model_path: str = (
47
+ "storage/models/tinyllama-1.1b-chat-v1.0.Q5_0.gguf" # Path to the model file
48
+ )
49
+
50
+
51
+ class LLMParams(BaseModel):
52
+ llm_arch: str = "langchain" # Options: [langchain]
53
+ use_history: bool = True
54
+ generate_follow_up: bool = False
55
+ memory_window: conint(ge=1) = 3
56
+ llm_style: str = "Normal" # Options: [Normal, ELI5]
57
+ llm_loader: str = (
58
+ "gpt-4o-mini" # Options: [local_llm, gpt-3.5-turbo-1106, gpt-4, gpt-4o-mini]
59
+ )
60
+ openai_params: Optional[OpenAIParams] = None
61
+ local_llm_params: Optional[LocalLLMParams] = None
62
+ stream: bool = False
63
+ pdf_reader: str = "gpt" # Options: [llama, pymupdf, gpt]
64
+
65
+
66
+ class ChatLoggingConfig(BaseModel):
67
+ log_chat: bool = True
68
+ platform: str = "literalai"
69
+ callbacks: bool = True
70
+
71
+
72
+ class SplitterOptions(BaseModel):
73
+ use_splitter: bool = True
74
+ split_by_token: bool = True
75
+ remove_leftover_delimiters: bool = True
76
+ remove_chunks: bool = False
77
+ chunking_mode: str = "semantic" # Options: [fixed, semantic]
78
+ chunk_size: conint(gt=0) = 300
79
+ chunk_overlap: conint(ge=0) = 30
80
+ chunk_separators: List[str] = ["\n\n", "\n", " ", ""]
81
+ front_chunks_to_remove: Optional[conint(ge=0)] = None
82
+ last_chunks_to_remove: Optional[conint(ge=0)] = None
83
+ delimiters_to_remove: List[str] = ["\t", "\n", " ", " "]
84
+
85
+
86
+ class RetrieverConfig(BaseModel):
87
+ retriever_hf_paths: dict[str, str] = {"RAGatouille": "XThomasBU/Colbert_Index"}
88
+
89
+
90
+ class MetadataConfig(BaseModel):
91
+ metadata_links: List[HttpUrl] = [
92
+ "https://dl4ds.github.io/sp2024/lectures/",
93
+ "https://dl4ds.github.io/sp2024/schedule/",
94
+ ]
95
+ slide_base_link: HttpUrl = "https://dl4ds.github.io"
96
+
97
+
98
+ class APIConfig(BaseModel):
99
+ timeout: conint(gt=0) = 60
100
+
101
+
102
+ class Config(BaseModel):
103
+ log_dir: str = "storage/logs"
104
+ log_chunk_dir: str = "storage/logs/chunks"
105
+ device: str = "cpu" # Options: ['cuda', 'cpu']
106
+
107
+ vectorstore: VectorStoreConfig
108
+ llm_params: LLMParams
109
+ chat_logging: ChatLoggingConfig
110
+ splitter_options: SplitterOptions
111
+ retriever: RetrieverConfig
112
+ metadata: MetadataConfig
113
+ api_config: APIConfig
114
+
115
+
116
+ class ConfigManager:
117
+ def __init__(self, config_path: str, project_config_path: str):
118
+ self.config_path = config_path
119
+ self.project_config_path = project_config_path
120
+ self.config = self.load_config()
121
+ self.validate_config()
122
+
123
+ def load_config(self) -> Config:
124
+ with open(self.config_path, "r") as f:
125
+ config_data = yaml.safe_load(f)
126
+
127
+ with open(self.project_config_path, "r") as f:
128
+ project_config_data = yaml.safe_load(f)
129
+
130
+ # Merge the two configurations
131
+ merged_config = {**config_data, **project_config_data}
132
+
133
+ return Config(**merged_config)
134
+
135
+ def get_config(self) -> Config:
136
+ return ConfigWrapper(self.config)
137
+
138
+ def validate_config(self):
139
+ # If any required fields are missing, raise an error
140
+ # required_fields = [
141
+ # "vectorstore", "llm_params", "chat_logging", "splitter_options",
142
+ # "retriever", "metadata", "token_config", "misc", "api_config"
143
+ # ]
144
+ # for field in required_fields:
145
+ # if not hasattr(self.config, field):
146
+ # raise ValueError(f"Missing required configuration field: {field}")
147
+
148
+ # # Validate types of specific fields
149
+ # if not isinstance(self.config.vectorstore, VectorStoreConfig):
150
+ # raise TypeError("vectorstore must be an instance of VectorStoreConfig")
151
+ # if not isinstance(self.config.llm_params, LLMParams):
152
+ # raise TypeError("llm_params must be an instance of LLMParams")
153
+ pass
154
+
155
+
156
+ class ConfigWrapper:
157
+ def __init__(self, config: Config):
158
+ self._config = config
159
+
160
+ def __getitem__(self, key):
161
+ return getattr(self._config, key)
162
+
163
+ def __getattr__(self, name):
164
+ return getattr(self._config, name)
165
+
166
+ def dict(self):
167
+ return self._config.dict()
168
+
169
+
170
+ # Usage
171
+ config_manager = ConfigManager(
172
+ config_path="config/config.yml", project_config_path="config/project_config.yml"
173
+ )
174
+ # config = config_manager.get_config().dict()
apps/chainlit_base/config/project_config.yml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ retriever:
2
+ retriever_hf_paths:
3
+ RAGatouille: "XThomasBU/Colbert_Index"
4
+
5
+ metadata:
6
+ metadata_links: ["https://dl4ds.github.io/sp2024/lectures/", "https://dl4ds.github.io/sp2024/schedule/"]
7
+ slide_base_link: "https://dl4ds.github.io"
8
+
9
+ api_config:
10
+ timeout: 60
apps/chainlit_base/config/prompts.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prompts = {
2
+ "openai": {
3
+ "rephrase_prompt": (
4
+ "You are someone that rephrases statements. Rephrase the student's question to add context from their chat history if relevant, ensuring it remains from the student's point of view. "
5
+ "Incorporate relevant details from the chat history to make the question clearer and more specific. "
6
+ "Do not change the meaning of the original statement, and maintain the student's tone and perspective. "
7
+ "If the question is conversational and doesn't require context, do not rephrase it. "
8
+ "Example: If the student previously asked about backpropagation in the context of deep learning and now asks 'what is it', rephrase to 'What is backpropagation.'. "
9
+ "Example: Do not rephrase if the user is asking something specific like 'cool, suggest a project with transformers to use as my final project' "
10
+ "Chat history: \n{chat_history}\n"
11
+ "Rephrase the following question only if necessary: '{input}'"
12
+ "Rephrased Question:'"
13
+ ),
14
+ "prompt_with_history": {
15
+ "normal": (
16
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
17
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
18
+ "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata. Use the source context that is most relevant. "
19
+ "Render math equations in LaTeX format between $ or $$ signs, stick to the parameter and variable icons found in your context. Be sure to explain the parameters and variables in the equations."
20
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n\n"
21
+ "Do not get influenced by the style of conversation in the chat history. Follow the instructions given here."
22
+ "Chat History:\n{chat_history}\n\n"
23
+ "Context:\n{context}\n\n"
24
+ "Answer the student's question below in a friendly, concise, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
25
+ "Student: {input}\n"
26
+ "AI Tutor:"
27
+ ),
28
+ "eli5": (
29
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Your job is to explain things in the simplest and most engaging way possible, just like the 'Explain Like I'm 5' (ELI5) concept."
30
+ "If you don't know the answer, do your best without making things up. Keep your explanations straightforward and very easy to understand."
31
+ "Use the chat history and context to help you, but avoid repeating past responses. Provide links from the source_file metadata when they're helpful."
32
+ "Use very simple language and examples to explain any math equations, and put the equations in LaTeX format between $ or $$ signs."
33
+ "Be friendly and engaging, like you're chatting with a young child who's curious and eager to learn. Avoid complex terms and jargon."
34
+ "Include simple and clear examples wherever you can to make things easier to understand."
35
+ "Do not get influenced by the style of conversation in the chat history. Follow the instructions given here."
36
+ "Chat History:\n{chat_history}\n\n"
37
+ "Context:\n{context}\n\n"
38
+ "Answer the student's question below in a friendly, simple, and engaging way, just like the ELI5 concept. Use the context and history only if they're relevant, otherwise, just have a natural conversation."
39
+ "Give a clear and detailed explanation with simple examples to make it easier to understand. Remember, your goal is to break down complex topics into very simple terms, just like ELI5."
40
+ "Student: {input}\n"
41
+ "AI Tutor:"
42
+ ),
43
+ "socratic": (
44
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Engage the student in a Socratic dialogue to help them discover answers on their own. Use the provided context to guide your questioning."
45
+ "If you don't know the answer, do your best without making things up. Keep the conversation engaging and inquisitive."
46
+ "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata when relevant. Use the source context that is most relevant."
47
+ "Speak in a friendly and engaging manner, encouraging critical thinking and self-discovery."
48
+ "Use questions to lead the student to explore the topic and uncover answers."
49
+ "Chat History:\n{chat_history}\n\n"
50
+ "Context:\n{context}\n\n"
51
+ "Answer the student's question below by guiding them through a series of questions and insights that lead to deeper understanding. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation."
52
+ "Foster an inquisitive mindset and help the student discover answers through dialogue."
53
+ "Student: {input}\n"
54
+ "AI Tutor:"
55
+ ),
56
+ },
57
+ "prompt_no_history": (
58
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
59
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
60
+ "Provide links from the source_file metadata. Use the source context that is most relevant. "
61
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n\n"
62
+ "Context:\n{context}\n\n"
63
+ "Answer the student's question below in a friendly, concise, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
64
+ "Student: {input}\n"
65
+ "AI Tutor:"
66
+ ),
67
+ },
68
+ "tiny_llama": {
69
+ "prompt_no_history": (
70
+ "system\n"
71
+ "Assistant is an intelligent chatbot designed to help students with questions regarding the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance.\n"
72
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally.\n"
73
+ "Provide links from the source_file metadata. Use the source context that is most relevant.\n"
74
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n"
75
+ "\n\n"
76
+ "user\n"
77
+ "Context:\n{context}\n\n"
78
+ "Question: {input}\n"
79
+ "\n\n"
80
+ "assistant"
81
+ ),
82
+ "prompt_with_history": (
83
+ "system\n"
84
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
85
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
86
+ "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata. Use the source context that is most relevant. "
87
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n"
88
+ "\n\n"
89
+ "user\n"
90
+ "Chat History:\n{chat_history}\n\n"
91
+ "Context:\n{context}\n\n"
92
+ "Question: {input}\n"
93
+ "\n\n"
94
+ "assistant"
95
+ ),
96
+ },
97
+ }
apps/chainlit_base/public/assets/images/avatars/ai-tutor.png ADDED
apps/chainlit_base/public/assets/images/avatars/ai_tutor.png ADDED
apps/chainlit_base/public/assets/images/starter_icons/acastusphoton-svgrepo-com.svg ADDED