Spaces:
Sleeping
Sleeping
Merge pull request #3 from JulsdL/Flashcards_implementation
Browse files- .chainlit/config.toml +0 -109
- .chainlit/translations/en-US.json +0 -231
- .gitignore +7 -1
- README.md +3 -3
- flashcards_cca7854c-91c2-47d5-872f-46132739ace0.csv +11 -0
- main.py +0 -118
- {aims_tutor β notebook_tutor}/__init__.py +0 -0
- aims_tutor/graph.py β notebook_tutor/agents.py +16 -89
- {aims_tutor β notebook_tutor}/app.py +1 -1
- {aims_tutor β notebook_tutor}/chainlit_frontend.py +85 -33
- {aims_tutor β notebook_tutor}/document_processing.py +1 -1
- notebook_tutor/graph.py +66 -0
- {aims_tutor β notebook_tutor}/prompt_templates.py +31 -1
- {aims_tutor β notebook_tutor}/retrieval.py +0 -0
- notebook_tutor/states.py +12 -0
- notebook_tutor/tools.py +61 -0
- {aims_tutor β notebook_tutor}/utils.py +0 -0
.chainlit/config.toml
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
[project]
|
2 |
-
# Whether to enable telemetry (default: true). No personal data is collected.
|
3 |
-
enable_telemetry = true
|
4 |
-
|
5 |
-
|
6 |
-
# List of environment variables to be provided by each user to use the app.
|
7 |
-
user_env = []
|
8 |
-
|
9 |
-
# Duration (in seconds) during which the session is saved when the connection is lost
|
10 |
-
session_timeout = 3600
|
11 |
-
|
12 |
-
# Enable third parties caching (e.g LangChain cache)
|
13 |
-
cache = false
|
14 |
-
|
15 |
-
# Authorized origins
|
16 |
-
allow_origins = ["*"]
|
17 |
-
|
18 |
-
# Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
|
19 |
-
# follow_symlink = false
|
20 |
-
|
21 |
-
[features]
|
22 |
-
# Show the prompt playground
|
23 |
-
prompt_playground = true
|
24 |
-
|
25 |
-
# Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
|
26 |
-
unsafe_allow_html = false
|
27 |
-
|
28 |
-
# Process and display mathematical expressions. This can clash with "$" characters in messages.
|
29 |
-
latex = false
|
30 |
-
|
31 |
-
# Automatically tag threads with the current chat profile (if a chat profile is used)
|
32 |
-
auto_tag_thread = true
|
33 |
-
|
34 |
-
# Authorize users to upload files with messages
|
35 |
-
[features.multi_modal]
|
36 |
-
enabled = true
|
37 |
-
accept = ["*/*"]
|
38 |
-
max_files = 20
|
39 |
-
max_size_mb = 500
|
40 |
-
|
41 |
-
# Allows user to use speech to text
|
42 |
-
[features.speech_to_text]
|
43 |
-
enabled = false
|
44 |
-
# See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
|
45 |
-
# language = "en-US"
|
46 |
-
|
47 |
-
[UI]
|
48 |
-
# Name of the app and chatbot.
|
49 |
-
name = "Chatbot"
|
50 |
-
|
51 |
-
# Show the readme while the thread is empty.
|
52 |
-
show_readme_as_default = true
|
53 |
-
|
54 |
-
# Description of the app and chatbot. This is used for HTML tags.
|
55 |
-
# description = ""
|
56 |
-
|
57 |
-
# Large size content are by default collapsed for a cleaner ui
|
58 |
-
default_collapse_content = true
|
59 |
-
|
60 |
-
# The default value for the expand messages settings.
|
61 |
-
default_expand_messages = false
|
62 |
-
|
63 |
-
# Hide the chain of thought details from the user in the UI.
|
64 |
-
hide_cot = false
|
65 |
-
|
66 |
-
# Link to your github repo. This will add a github button in the UI's header.
|
67 |
-
# github = ""
|
68 |
-
|
69 |
-
# Specify a CSS file that can be used to customize the user interface.
|
70 |
-
# The CSS file can be served from the public directory or via an external link.
|
71 |
-
# custom_css = "/public/test.css"
|
72 |
-
|
73 |
-
# Specify a Javascript file that can be used to customize the user interface.
|
74 |
-
# The Javascript file can be served from the public directory.
|
75 |
-
# custom_js = "/public/test.js"
|
76 |
-
|
77 |
-
# Specify a custom font url.
|
78 |
-
# custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap"
|
79 |
-
|
80 |
-
# Specify a custom build directory for the frontend.
|
81 |
-
# This can be used to customize the frontend code.
|
82 |
-
# Be careful: If this is a relative path, it should not start with a slash.
|
83 |
-
# custom_build = "./public/build"
|
84 |
-
|
85 |
-
# Override default MUI light theme. (Check theme.ts)
|
86 |
-
[UI.theme]
|
87 |
-
#font_family = "Inter, sans-serif"
|
88 |
-
[UI.theme.light]
|
89 |
-
#background = "#FAFAFA"
|
90 |
-
#paper = "#FFFFFF"
|
91 |
-
|
92 |
-
[UI.theme.light.primary]
|
93 |
-
#main = "#F80061"
|
94 |
-
#dark = "#980039"
|
95 |
-
#light = "#FFE7EB"
|
96 |
-
|
97 |
-
# Override default MUI dark theme. (Check theme.ts)
|
98 |
-
[UI.theme.dark]
|
99 |
-
#background = "#FAFAFA"
|
100 |
-
#paper = "#FFFFFF"
|
101 |
-
|
102 |
-
[UI.theme.dark.primary]
|
103 |
-
#main = "#F80061"
|
104 |
-
#dark = "#980039"
|
105 |
-
#light = "#FFE7EB"
|
106 |
-
|
107 |
-
|
108 |
-
[meta]
|
109 |
-
generated_by = "1.0.506"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.chainlit/translations/en-US.json
DELETED
@@ -1,231 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"components": {
|
3 |
-
"atoms": {
|
4 |
-
"buttons": {
|
5 |
-
"userButton": {
|
6 |
-
"menu": {
|
7 |
-
"settings": "Settings",
|
8 |
-
"settingsKey": "S",
|
9 |
-
"APIKeys": "API Keys",
|
10 |
-
"logout": "Logout"
|
11 |
-
}
|
12 |
-
}
|
13 |
-
}
|
14 |
-
},
|
15 |
-
"molecules": {
|
16 |
-
"newChatButton": {
|
17 |
-
"newChat": "New Chat"
|
18 |
-
},
|
19 |
-
"tasklist": {
|
20 |
-
"TaskList": {
|
21 |
-
"title": "\ud83d\uddd2\ufe0f Task List",
|
22 |
-
"loading": "Loading...",
|
23 |
-
"error": "An error occured"
|
24 |
-
}
|
25 |
-
},
|
26 |
-
"attachments": {
|
27 |
-
"cancelUpload": "Cancel upload",
|
28 |
-
"removeAttachment": "Remove attachment"
|
29 |
-
},
|
30 |
-
"newChatDialog": {
|
31 |
-
"createNewChat": "Create new chat?",
|
32 |
-
"clearChat": "This will clear the current messages and start a new chat.",
|
33 |
-
"cancel": "Cancel",
|
34 |
-
"confirm": "Confirm"
|
35 |
-
},
|
36 |
-
"settingsModal": {
|
37 |
-
"settings": "Settings",
|
38 |
-
"expandMessages": "Expand Messages",
|
39 |
-
"hideChainOfThought": "Hide Chain of Thought",
|
40 |
-
"darkMode": "Dark Mode"
|
41 |
-
},
|
42 |
-
"detailsButton": {
|
43 |
-
"using": "Using",
|
44 |
-
"running": "Running",
|
45 |
-
"took_one": "Took {{count}} step",
|
46 |
-
"took_other": "Took {{count}} steps"
|
47 |
-
},
|
48 |
-
"auth": {
|
49 |
-
"authLogin": {
|
50 |
-
"title": "Login to access the app.",
|
51 |
-
"form": {
|
52 |
-
"email": "Email address",
|
53 |
-
"password": "Password",
|
54 |
-
"noAccount": "Don't have an account?",
|
55 |
-
"alreadyHaveAccount": "Already have an account?",
|
56 |
-
"signup": "Sign Up",
|
57 |
-
"signin": "Sign In",
|
58 |
-
"or": "OR",
|
59 |
-
"continue": "Continue",
|
60 |
-
"forgotPassword": "Forgot password?",
|
61 |
-
"passwordMustContain": "Your password must contain:",
|
62 |
-
"emailRequired": "email is a required field",
|
63 |
-
"passwordRequired": "password is a required field"
|
64 |
-
},
|
65 |
-
"error": {
|
66 |
-
"default": "Unable to sign in.",
|
67 |
-
"signin": "Try signing in with a different account.",
|
68 |
-
"oauthsignin": "Try signing in with a different account.",
|
69 |
-
"redirect_uri_mismatch": "The redirect URI is not matching the oauth app configuration.",
|
70 |
-
"oauthcallbackerror": "Try signing in with a different account.",
|
71 |
-
"oauthcreateaccount": "Try signing in with a different account.",
|
72 |
-
"emailcreateaccount": "Try signing in with a different account.",
|
73 |
-
"callback": "Try signing in with a different account.",
|
74 |
-
"oauthaccountnotlinked": "To confirm your identity, sign in with the same account you used originally.",
|
75 |
-
"emailsignin": "The e-mail could not be sent.",
|
76 |
-
"emailverify": "Please verify your email, a new email has been sent.",
|
77 |
-
"credentialssignin": "Sign in failed. Check the details you provided are correct.",
|
78 |
-
"sessionrequired": "Please sign in to access this page."
|
79 |
-
}
|
80 |
-
},
|
81 |
-
"authVerifyEmail": {
|
82 |
-
"almostThere": "You're almost there! We've sent an email to ",
|
83 |
-
"verifyEmailLink": "Please click on the link in that email to complete your signup.",
|
84 |
-
"didNotReceive": "Can't find the email?",
|
85 |
-
"resendEmail": "Resend email",
|
86 |
-
"goBack": "Go Back",
|
87 |
-
"emailSent": "Email sent successfully.",
|
88 |
-
"verifyEmail": "Verify your email address"
|
89 |
-
},
|
90 |
-
"providerButton": {
|
91 |
-
"continue": "Continue with {{provider}}",
|
92 |
-
"signup": "Sign up with {{provider}}"
|
93 |
-
},
|
94 |
-
"authResetPassword": {
|
95 |
-
"newPasswordRequired": "New password is a required field",
|
96 |
-
"passwordsMustMatch": "Passwords must match",
|
97 |
-
"confirmPasswordRequired": "Confirm password is a required field",
|
98 |
-
"newPassword": "New password",
|
99 |
-
"confirmPassword": "Confirm password",
|
100 |
-
"resetPassword": "Reset Password"
|
101 |
-
},
|
102 |
-
"authForgotPassword": {
|
103 |
-
"email": "Email address",
|
104 |
-
"emailRequired": "email is a required field",
|
105 |
-
"emailSent": "Please check the email address {{email}} for instructions to reset your password.",
|
106 |
-
"enterEmail": "Enter your email address and we will send you instructions to reset your password.",
|
107 |
-
"resendEmail": "Resend email",
|
108 |
-
"continue": "Continue",
|
109 |
-
"goBack": "Go Back"
|
110 |
-
}
|
111 |
-
}
|
112 |
-
},
|
113 |
-
"organisms": {
|
114 |
-
"chat": {
|
115 |
-
"history": {
|
116 |
-
"index": {
|
117 |
-
"showHistory": "Show history",
|
118 |
-
"lastInputs": "Last Inputs",
|
119 |
-
"noInputs": "Such empty...",
|
120 |
-
"loading": "Loading..."
|
121 |
-
}
|
122 |
-
},
|
123 |
-
"inputBox": {
|
124 |
-
"input": {
|
125 |
-
"placeholder": "Type your message here..."
|
126 |
-
},
|
127 |
-
"speechButton": {
|
128 |
-
"start": "Start recording",
|
129 |
-
"stop": "Stop recording"
|
130 |
-
},
|
131 |
-
"SubmitButton": {
|
132 |
-
"sendMessage": "Send message",
|
133 |
-
"stopTask": "Stop Task"
|
134 |
-
},
|
135 |
-
"UploadButton": {
|
136 |
-
"attachFiles": "Attach files"
|
137 |
-
},
|
138 |
-
"waterMark": {
|
139 |
-
"text": "Built with"
|
140 |
-
}
|
141 |
-
},
|
142 |
-
"Messages": {
|
143 |
-
"index": {
|
144 |
-
"running": "Running",
|
145 |
-
"executedSuccessfully": "executed successfully",
|
146 |
-
"failed": "failed",
|
147 |
-
"feedbackUpdated": "Feedback updated",
|
148 |
-
"updating": "Updating"
|
149 |
-
}
|
150 |
-
},
|
151 |
-
"dropScreen": {
|
152 |
-
"dropYourFilesHere": "Drop your files here"
|
153 |
-
},
|
154 |
-
"index": {
|
155 |
-
"failedToUpload": "Failed to upload",
|
156 |
-
"cancelledUploadOf": "Cancelled upload of",
|
157 |
-
"couldNotReachServer": "Could not reach the server",
|
158 |
-
"continuingChat": "Continuing previous chat"
|
159 |
-
},
|
160 |
-
"settings": {
|
161 |
-
"settingsPanel": "Settings panel",
|
162 |
-
"reset": "Reset",
|
163 |
-
"cancel": "Cancel",
|
164 |
-
"confirm": "Confirm"
|
165 |
-
}
|
166 |
-
},
|
167 |
-
"threadHistory": {
|
168 |
-
"sidebar": {
|
169 |
-
"filters": {
|
170 |
-
"FeedbackSelect": {
|
171 |
-
"feedbackAll": "Feedback: All",
|
172 |
-
"feedbackPositive": "Feedback: Positive",
|
173 |
-
"feedbackNegative": "Feedback: Negative"
|
174 |
-
},
|
175 |
-
"SearchBar": {
|
176 |
-
"search": "Search"
|
177 |
-
}
|
178 |
-
},
|
179 |
-
"DeleteThreadButton": {
|
180 |
-
"confirmMessage": "This will delete the thread as well as it's messages and elements.",
|
181 |
-
"cancel": "Cancel",
|
182 |
-
"confirm": "Confirm",
|
183 |
-
"deletingChat": "Deleting chat",
|
184 |
-
"chatDeleted": "Chat deleted"
|
185 |
-
},
|
186 |
-
"index": {
|
187 |
-
"pastChats": "Past Chats"
|
188 |
-
},
|
189 |
-
"ThreadList": {
|
190 |
-
"empty": "Empty...",
|
191 |
-
"today": "Today",
|
192 |
-
"yesterday": "Yesterday",
|
193 |
-
"previous7days": "Previous 7 days",
|
194 |
-
"previous30days": "Previous 30 days"
|
195 |
-
},
|
196 |
-
"TriggerButton": {
|
197 |
-
"closeSidebar": "Close sidebar",
|
198 |
-
"openSidebar": "Open sidebar"
|
199 |
-
}
|
200 |
-
},
|
201 |
-
"Thread": {
|
202 |
-
"backToChat": "Go back to chat",
|
203 |
-
"chatCreatedOn": "This chat was created on"
|
204 |
-
}
|
205 |
-
},
|
206 |
-
"header": {
|
207 |
-
"chat": "Chat",
|
208 |
-
"readme": "Readme"
|
209 |
-
}
|
210 |
-
}
|
211 |
-
},
|
212 |
-
"hooks": {
|
213 |
-
"useLLMProviders": {
|
214 |
-
"failedToFetchProviders": "Failed to fetch providers:"
|
215 |
-
}
|
216 |
-
},
|
217 |
-
"pages": {
|
218 |
-
"Design": {},
|
219 |
-
"Env": {
|
220 |
-
"savedSuccessfully": "Saved successfully",
|
221 |
-
"requiredApiKeys": "Required API Keys",
|
222 |
-
"requiredApiKeysInfo": "To use this app, the following API keys are required. The keys are stored on your device's local storage."
|
223 |
-
},
|
224 |
-
"Page": {
|
225 |
-
"notPartOfProject": "You are not part of this project."
|
226 |
-
},
|
227 |
-
"ResumeButton": {
|
228 |
-
"resumeChat": "Resume Chat"
|
229 |
-
}
|
230 |
-
}
|
231 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
CHANGED
@@ -82,10 +82,16 @@ target/
|
|
82 |
profile_default/
|
83 |
ipython_config.py
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
# pyenv
|
86 |
# For a library or package, you might want to ignore these files since the code is
|
87 |
# intended to run in multiple environments; otherwise, check them in:
|
88 |
-
|
89 |
|
90 |
# pipenv
|
91 |
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
|
82 |
profile_default/
|
83 |
ipython_config.py
|
84 |
|
85 |
+
# FLashcard directory
|
86 |
+
flashcards/
|
87 |
+
|
88 |
+
# .chainlit directory
|
89 |
+
.chainlit/
|
90 |
+
|
91 |
# pyenv
|
92 |
# For a library or package, you might want to ignore these files since the code is
|
93 |
# intended to run in multiple environments; otherwise, check them in:
|
94 |
+
.python-version
|
95 |
|
96 |
# pipenv
|
97 |
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
#
|
2 |
|
3 |
# RAG Application for QA in Jupyter Notebook
|
4 |
|
5 |
-
|
6 |
|
7 |
## Features
|
8 |
|
@@ -28,7 +28,7 @@ OPENAI_API_KEY=your-key-here
|
|
28 |
4. Run the application using the following command:
|
29 |
|
30 |
```bash
|
31 |
-
chainlit run
|
32 |
```
|
33 |
|
34 |
## Usage
|
|
|
1 |
+
# AI-Notebook-Tutor
|
2 |
|
3 |
# RAG Application for QA in Jupyter Notebook
|
4 |
|
5 |
+
AI-Notebook-Tutor is designed to provide question-answering capabilities in a Jupyter Notebook using the Retrieval Augmented Generation (RAG) model. It's built on top of the LangChain and Chainlit platforms, and it uses the OpenAI API for the chat model.
|
6 |
|
7 |
## Features
|
8 |
|
|
|
28 |
4. Run the application using the following command:
|
29 |
|
30 |
```bash
|
31 |
+
chainlit run notebook_tutor/app.py
|
32 |
```
|
33 |
|
34 |
## Usage
|
flashcards_cca7854c-91c2-47d5-872f-46132739ace0.csv
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Front,Back
|
2 |
+
What command is used to clone a GitHub repository in a notebook?,!git clone https://github.com/arcee-ai/DALM
|
3 |
+
How do you install or upgrade a Python package in a notebook?,!pip install --upgrade -q -e .
|
4 |
+
Which command installs the 'langchain' and 'langchain-community' libraries?,!pip install -qU langchain langchain-core langchain-community sentence_transformers
|
5 |
+
What is the command to install 'pymupdf' and 'faiss-cpu'?,!pip install -qU pymupdf faiss-cpu
|
6 |
+
How do you import the Pandas library in Python?,import pandas as pd
|
7 |
+
Which library provides the 'HuggingFaceEmbeddings' class?,from langchain_community.embeddings import HuggingFaceEmbeddings
|
8 |
+
How do you import the 'FAISS' vector store from the 'langchain_community' library?,from langchain_community.vectorstores import FAISS
|
9 |
+
What is the import statement for reading directories using the 'Llama Index' library?,from llama_index.core import SimpleDirectoryReader
|
10 |
+
Which import statement is used for parsing nodes in the 'Llama Index' library?,from llama_index.core.node_parser import SimpleNodeParser
|
11 |
+
How do you import the 'MetadataMode' schema from the 'Llama Index' library?,from llama_index.core.schema import MetadataMode
|
main.py
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from operator import itemgetter
|
3 |
-
|
4 |
-
import chainlit as cl
|
5 |
-
import tiktoken
|
6 |
-
from dotenv import load_dotenv
|
7 |
-
|
8 |
-
|
9 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
10 |
-
from langchain.retrievers import MultiQueryRetriever
|
11 |
-
from langchain_core.prompts import ChatPromptTemplate
|
12 |
-
from langchain_core.runnables import RunnablePassthrough
|
13 |
-
from langchain_community.document_loaders import PyMuPDFLoader, PythonLoader, NotebookLoader
|
14 |
-
from langchain_community.vectorstores import Qdrant
|
15 |
-
from langchain_openai import ChatOpenAI
|
16 |
-
from langchain_openai.embeddings import OpenAIEmbeddings
|
17 |
-
|
18 |
-
# Load environment variables
|
19 |
-
load_dotenv()
|
20 |
-
|
21 |
-
# Configuration for OpenAI
|
22 |
-
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
|
23 |
-
openai_chat_model = ChatOpenAI(model="gpt-4-turbo", temperature=0.1)
|
24 |
-
|
25 |
-
# Define the RAG prompt
|
26 |
-
RAG_PROMPT = """
|
27 |
-
CONTEXT:
|
28 |
-
{context}
|
29 |
-
|
30 |
-
QUERY:
|
31 |
-
{question}
|
32 |
-
|
33 |
-
Answer the query in a pretty format if the context is related to it; otherwise, answer: 'Sorry, I can't answer.'
|
34 |
-
"""
|
35 |
-
rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)
|
36 |
-
|
37 |
-
|
38 |
-
# ChainLit setup for chat interaction
|
39 |
-
@cl.on_chat_start
|
40 |
-
async def start_chat():
|
41 |
-
settings = {
|
42 |
-
"model": "gpt-3.5-turbo",
|
43 |
-
"temperature": 0,
|
44 |
-
"top_p": 1,
|
45 |
-
"frequency_penalty": 0,
|
46 |
-
"presence_penalty": 0,
|
47 |
-
}
|
48 |
-
cl.user_session.set("settings", settings)
|
49 |
-
|
50 |
-
# Display a welcoming message with instructions
|
51 |
-
welcome_message = "Welcome to the AIMS-Tutor! Please upload a Jupyter notebook (.ipynb and max. 5mb) to start."
|
52 |
-
await cl.Message(content=welcome_message).send()
|
53 |
-
|
54 |
-
# Wait for the user to upload a file
|
55 |
-
files = None
|
56 |
-
while files is None:
|
57 |
-
files = await cl.AskFileMessage(
|
58 |
-
content="Please upload a Jupyter notebook (.ipynb, max. 5mb):",
|
59 |
-
accept={"application/x-ipynb+json": [".ipynb"]},
|
60 |
-
max_size_mb=5
|
61 |
-
).send()
|
62 |
-
|
63 |
-
file = files[0] # Get the first file
|
64 |
-
|
65 |
-
if file:
|
66 |
-
# Load the Jupyter notebook
|
67 |
-
notebook_path = file.path # Extract the path from the AskFileResponse object
|
68 |
-
|
69 |
-
loader = NotebookLoader(
|
70 |
-
notebook_path,
|
71 |
-
include_outputs=False,
|
72 |
-
max_output_length=20,
|
73 |
-
remove_newline=True,
|
74 |
-
traceback=False
|
75 |
-
)
|
76 |
-
docs = loader.load()
|
77 |
-
cl.user_session.set("docs", docs) # Store the docs in the user session
|
78 |
-
|
79 |
-
# Initialize the retriever components after loading document
|
80 |
-
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=50, length_function=tiktoken_len) # Initialize the text splitter
|
81 |
-
split_chunks = text_splitter.split_documents(docs) # Split the documents into chunks
|
82 |
-
embedding_model = OpenAIEmbeddings(model="text-embedding-3-small") # Initialize the embedding model
|
83 |
-
qdrant_vectorstore = Qdrant.from_documents(split_chunks, embedding_model, location=":memory:", collection_name="Notebook") # Create a Qdrant vector store
|
84 |
-
qdrant_retriever = qdrant_vectorstore.as_retriever() # Set the Qdrant vector store as a retriever
|
85 |
-
multiquery_retriever = MultiQueryRetriever.from_llm(retriever=qdrant_retriever, llm=openai_chat_model, include_original=True) # Create a multi-query retriever on top of the Qdrant retriever
|
86 |
-
|
87 |
-
# Store the multiquery_retriever in the user session
|
88 |
-
cl.user_session.set("multiquery_retriever", multiquery_retriever)
|
89 |
-
|
90 |
-
|
91 |
-
@cl.on_message
|
92 |
-
async def main(message: cl.Message):
|
93 |
-
# Retrieve the multi-query retriever from session
|
94 |
-
multiquery_retriever = cl.user_session.get("multiquery_retriever")
|
95 |
-
if not multiquery_retriever:
|
96 |
-
await cl.Message(content="No document processing setup found. Please upload a Jupyter notebook first.").send()
|
97 |
-
return
|
98 |
-
|
99 |
-
question = message.content
|
100 |
-
response = handle_query(question, multiquery_retriever) # Process the question
|
101 |
-
|
102 |
-
msg = cl.Message(content=response)
|
103 |
-
await msg.send()
|
104 |
-
|
105 |
-
def handle_query(question, retriever):
|
106 |
-
# Define the retrieval augmented query-answering chain
|
107 |
-
retrieval_augmented_qa_chain = (
|
108 |
-
{"context": itemgetter("question") | retriever, "question": itemgetter("question")}
|
109 |
-
| RunnablePassthrough.assign(context=itemgetter("context"))
|
110 |
-
| {"response": rag_prompt | openai_chat_model, "context": itemgetter("context")}
|
111 |
-
)
|
112 |
-
response = retrieval_augmented_qa_chain.invoke({"question": question})
|
113 |
-
return response["response"].content
|
114 |
-
|
115 |
-
# Tokenization function
|
116 |
-
def tiktoken_len(text):
|
117 |
-
tokens = tiktoken.encoding_for_model("gpt-3.5-turbo").encode(text)
|
118 |
-
return len(tokens)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{aims_tutor β notebook_tutor}/__init__.py
RENAMED
File without changes
|
aims_tutor/graph.py β notebook_tutor/agents.py
RENAMED
@@ -1,37 +1,24 @@
|
|
1 |
-
from typing import Annotated
|
2 |
-
from dotenv import load_dotenv
|
3 |
from langchain_core.tools import tool
|
4 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
5 |
-
from langchain_core.messages import AIMessage
|
6 |
from langchain.agents import AgentExecutor, create_openai_functions_agent
|
7 |
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
|
8 |
from langchain_openai import ChatOpenAI
|
9 |
-
from
|
10 |
-
import functools
|
11 |
|
12 |
-
# Load environment variables
|
13 |
-
load_dotenv()
|
14 |
|
15 |
# Instantiate the language model
|
16 |
llm = ChatOpenAI(model="gpt-4o")
|
17 |
|
18 |
-
|
19 |
-
def __init__(self, retrieval_chain):
|
20 |
-
self.retrieval_chain = retrieval_chain
|
21 |
-
|
22 |
-
def retrieve_information(
|
23 |
-
self,
|
24 |
-
query: Annotated[str, "query to ask the RAG tool"]
|
25 |
-
):
|
26 |
-
"""Use this tool to retrieve information about the provided notebook."""
|
27 |
-
response = self.retrieval_chain.invoke({"question": query})
|
28 |
-
return response["response"].content
|
29 |
-
|
30 |
-
# Create an instance of the wrapper
|
31 |
def get_retrieve_information_tool(retrieval_chain):
|
32 |
wrapper_instance = RetrievalChainWrapper(retrieval_chain)
|
33 |
return tool(wrapper_instance.retrieve_information)
|
34 |
|
|
|
|
|
|
|
35 |
# Function to create agents
|
36 |
def create_agent(
|
37 |
llm: ChatOpenAI,
|
@@ -60,19 +47,21 @@ def create_agent(
|
|
60 |
# Function to create agent nodes
|
61 |
def agent_node(state, agent, name):
|
62 |
result = agent.invoke(state)
|
63 |
-
if 'messages' not in result:
|
64 |
raise ValueError(f"No messages found in agent state: {result}")
|
65 |
new_state = {"messages": state["messages"] + [AIMessage(content=result["output"], name=name)]}
|
66 |
-
|
67 |
-
|
68 |
-
if name == "QuizAgent"
|
69 |
new_state["quiz_created"] = True
|
70 |
-
|
71 |
if name == "QAAgent":
|
72 |
new_state["question_answered"] = True
|
73 |
-
new_state["next"] = "question_answered"
|
74 |
-
return new_state
|
75 |
|
|
|
|
|
|
|
|
|
76 |
|
77 |
# Function to create the supervisor
|
78 |
def create_team_supervisor(llm: ChatOpenAI, system_prompt, members) -> AgentExecutor:
|
@@ -111,65 +100,3 @@ def create_team_supervisor(llm: ChatOpenAI, system_prompt, members) -> AgentExec
|
|
111 |
| llm.bind_functions(functions=[function_def], function_call="route")
|
112 |
| JsonOutputFunctionsParser()
|
113 |
)
|
114 |
-
|
115 |
-
# Define the state for the system
|
116 |
-
class AIMSState(TypedDict):
|
117 |
-
messages: List[BaseMessage]
|
118 |
-
next: str
|
119 |
-
quiz: List[dict]
|
120 |
-
quiz_created: bool
|
121 |
-
question_answered: bool
|
122 |
-
|
123 |
-
|
124 |
-
# Create the LangGraph chain
|
125 |
-
def create_aims_chain(retrieval_chain):
|
126 |
-
|
127 |
-
retrieve_information_tool = get_retrieve_information_tool(retrieval_chain)
|
128 |
-
|
129 |
-
# Create QA Agent
|
130 |
-
qa_agent = create_agent(
|
131 |
-
llm,
|
132 |
-
[retrieve_information_tool],
|
133 |
-
"You are a QA assistant who answers questions about the provided notebook content.",
|
134 |
-
)
|
135 |
-
|
136 |
-
qa_node = functools.partial(agent_node, agent=qa_agent, name="QAAgent")
|
137 |
-
|
138 |
-
# Create Quiz Agent
|
139 |
-
quiz_agent = create_agent(
|
140 |
-
llm,
|
141 |
-
[retrieve_information_tool],
|
142 |
-
"You are a quiz creator that generates quizzes based on the provided notebook content."
|
143 |
-
|
144 |
-
"""First, You MUST Use the retrieval_inforation_tool to gather context from the notebook to gather relevant and accurate information.
|
145 |
-
|
146 |
-
Next, create a 5-question quiz based on the information you have gathered. Include the answers at the end of the quiz.
|
147 |
-
|
148 |
-
Present the quiz to the user in a clear and concise manner."""
|
149 |
-
)
|
150 |
-
|
151 |
-
quiz_node = functools.partial(agent_node, agent=quiz_agent, name="QuizAgent")
|
152 |
-
|
153 |
-
# Create Supervisor Agent
|
154 |
-
supervisor_agent = create_team_supervisor(
|
155 |
-
llm,
|
156 |
-
"You are a supervisor tasked with managing a conversation between the following agents: QAAgent, QuizAgent. Given the user request, decide which agent should act next.",
|
157 |
-
["QAAgent", "QuizAgent"],
|
158 |
-
)
|
159 |
-
|
160 |
-
# Build the LangGraph
|
161 |
-
aims_graph = StateGraph(AIMSState)
|
162 |
-
aims_graph.add_node("QAAgent", qa_node)
|
163 |
-
aims_graph.add_node("QuizAgent", quiz_node)
|
164 |
-
aims_graph.add_node("supervisor", supervisor_agent)
|
165 |
-
|
166 |
-
aims_graph.add_edge("QAAgent", "supervisor")
|
167 |
-
aims_graph.add_edge("QuizAgent", "supervisor")
|
168 |
-
aims_graph.add_conditional_edges(
|
169 |
-
"supervisor",
|
170 |
-
lambda x: "FINISH" if x.get("quiz_created") else ("FINISH" if x.get("question_answered") else x["next"]),
|
171 |
-
{"QAAgent": "QAAgent", "QuizAgent": "QuizAgent", "WAIT": END, "FINISH": END, "question_answered": END},
|
172 |
-
)
|
173 |
-
|
174 |
-
aims_graph.set_entry_point("supervisor")
|
175 |
-
return aims_graph.compile()
|
|
|
1 |
+
from typing import Annotated
|
|
|
2 |
from langchain_core.tools import tool
|
3 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
4 |
+
from langchain_core.messages import AIMessage
|
5 |
from langchain.agents import AgentExecutor, create_openai_functions_agent
|
6 |
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
|
7 |
from langchain_openai import ChatOpenAI
|
8 |
+
from tools import create_flashcards_tool, RetrievalChainWrapper
|
|
|
9 |
|
|
|
|
|
10 |
|
11 |
# Instantiate the language model
|
12 |
llm = ChatOpenAI(model="gpt-4o")
|
13 |
|
14 |
+
# Function to create an instance of the retrieval tool wrapper
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
def get_retrieve_information_tool(retrieval_chain):
|
16 |
wrapper_instance = RetrievalChainWrapper(retrieval_chain)
|
17 |
return tool(wrapper_instance.retrieve_information)
|
18 |
|
19 |
+
# Instantiate the flashcard tool
|
20 |
+
flashcard_tool = create_flashcards_tool
|
21 |
+
|
22 |
# Function to create agents
|
23 |
def create_agent(
|
24 |
llm: ChatOpenAI,
|
|
|
47 |
# Function to create agent nodes
|
48 |
def agent_node(state, agent, name):
|
49 |
result = agent.invoke(state)
|
50 |
+
if 'messages' not in result:
|
51 |
raise ValueError(f"No messages found in agent state: {result}")
|
52 |
new_state = {"messages": state["messages"] + [AIMessage(content=result["output"], name=name)]}
|
53 |
+
|
54 |
+
# Set the appropriate flags and next state
|
55 |
+
if name == "QuizAgent":
|
56 |
new_state["quiz_created"] = True
|
57 |
+
|
58 |
if name == "QAAgent":
|
59 |
new_state["question_answered"] = True
|
|
|
|
|
60 |
|
61 |
+
if name == "FlashcardsAgent":
|
62 |
+
new_state["flashcards_created"] = True
|
63 |
+
|
64 |
+
return new_state
|
65 |
|
66 |
# Function to create the supervisor
|
67 |
def create_team_supervisor(llm: ChatOpenAI, system_prompt, members) -> AgentExecutor:
|
|
|
100 |
| llm.bind_functions(functions=[function_def], function_call="route")
|
101 |
| JsonOutputFunctionsParser()
|
102 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{aims_tutor β notebook_tutor}/app.py
RENAMED
@@ -1,6 +1,6 @@
|
|
1 |
import os
|
2 |
from dotenv import load_dotenv
|
3 |
-
import
|
4 |
|
5 |
# Load environment variables
|
6 |
load_dotenv()
|
|
|
1 |
import os
|
2 |
from dotenv import load_dotenv
|
3 |
+
import notebook_tutor.chainlit_frontend as cl_frontend
|
4 |
|
5 |
# Load environment variables
|
6 |
load_dotenv()
|
{aims_tutor β notebook_tutor}/chainlit_frontend.py
RENAMED
@@ -1,30 +1,38 @@
|
|
|
|
|
|
1 |
import chainlit as cl
|
2 |
from dotenv import load_dotenv
|
3 |
from document_processing import DocumentManager
|
4 |
from retrieval import RetrievalManager
|
5 |
from langchain_core.messages import AIMessage, HumanMessage
|
6 |
-
from graph import
|
|
|
7 |
|
8 |
# Load environment variables
|
9 |
load_dotenv()
|
10 |
|
|
|
|
|
|
|
|
|
|
|
11 |
@cl.on_chat_start
|
12 |
async def start_chat():
|
13 |
settings = {
|
14 |
-
"model": "
|
15 |
"temperature": 0,
|
16 |
"top_p": 1,
|
17 |
"frequency_penalty": 0,
|
18 |
"presence_penalty": 0,
|
19 |
}
|
20 |
cl.user_session.set("settings", settings)
|
21 |
-
welcome_message = "Welcome to the
|
22 |
await cl.Message(content=welcome_message).send()
|
23 |
|
24 |
files = None
|
25 |
while files is None:
|
26 |
files = await cl.AskFileMessage(
|
27 |
-
content="Please upload a Jupyter notebook (.ipynb, max. 5mb):",
|
28 |
accept={"application/x-ipynb+json": [".ipynb"]},
|
29 |
max_size_mb=5
|
30 |
).send()
|
@@ -42,48 +50,92 @@ async def start_chat():
|
|
42 |
# Initialize LangGraph chain with the retrieval chain
|
43 |
retrieval_chain = cl.user_session.get("retrieval_manager").get_RAG_QA_chain()
|
44 |
cl.user_session.set("retrieval_chain", retrieval_chain)
|
45 |
-
|
46 |
-
cl.user_session.set("
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
@cl.on_message
|
49 |
async def main(message: cl.Message):
|
|
|
50 |
# Retrieve the LangGraph chain from the session
|
51 |
-
|
52 |
|
53 |
-
if not
|
54 |
await cl.Message(content="No document processing setup found. Please upload a Jupyter notebook first.").send()
|
55 |
return
|
56 |
|
57 |
# Create the initial state with the user message
|
58 |
user_message = message.content
|
59 |
-
state =
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
-
|
63 |
|
64 |
# Process the message through the LangGraph chain
|
65 |
-
for s in
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
if
|
72 |
-
|
73 |
-
|
74 |
-
await cl.Message(content=response).send()
|
75 |
-
else:
|
76 |
-
print("Error: No messages found in agent state.")
|
77 |
-
else:
|
78 |
-
# Check if the quiz was created and send it to the frontend
|
79 |
-
if state["quiz_created"]:
|
80 |
-
quiz_message = state["messages"][-1].content
|
81 |
-
await cl.Message(content=quiz_message).send()
|
82 |
-
# Check if a question was answered and send the response to the frontend
|
83 |
-
if state["question_answered"]:
|
84 |
-
qa_message = state["messages"][-1].content
|
85 |
await cl.Message(content=qa_message).send()
|
86 |
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
import chainlit as cl
|
4 |
from dotenv import load_dotenv
|
5 |
from document_processing import DocumentManager
|
6 |
from retrieval import RetrievalManager
|
7 |
from langchain_core.messages import AIMessage, HumanMessage
|
8 |
+
from graph import create_tutor_chain, TutorState
|
9 |
+
import shutil
|
10 |
|
11 |
# Load environment variables
|
12 |
load_dotenv()
|
13 |
|
14 |
+
# Set up logging
|
15 |
+
logging.basicConfig(level=logging.INFO)
|
16 |
+
|
17 |
+
logger = logging.getLogger(__name__)
|
18 |
+
|
19 |
@cl.on_chat_start
|
20 |
async def start_chat():
|
21 |
settings = {
|
22 |
+
"model": "gpt4o",
|
23 |
"temperature": 0,
|
24 |
"top_p": 1,
|
25 |
"frequency_penalty": 0,
|
26 |
"presence_penalty": 0,
|
27 |
}
|
28 |
cl.user_session.set("settings", settings)
|
29 |
+
welcome_message = "Welcome to the Notebook-Tutor!"
|
30 |
await cl.Message(content=welcome_message).send()
|
31 |
|
32 |
files = None
|
33 |
while files is None:
|
34 |
files = await cl.AskFileMessage(
|
35 |
+
content="Please upload a Jupyter notebook (.ipynb, max. 5mb) to start:",
|
36 |
accept={"application/x-ipynb+json": [".ipynb"]},
|
37 |
max_size_mb=5
|
38 |
).send()
|
|
|
50 |
# Initialize LangGraph chain with the retrieval chain
|
51 |
retrieval_chain = cl.user_session.get("retrieval_manager").get_RAG_QA_chain()
|
52 |
cl.user_session.set("retrieval_chain", retrieval_chain)
|
53 |
+
tutor_chain = create_tutor_chain(retrieval_chain)
|
54 |
+
cl.user_session.set("tutor_chain", tutor_chain)
|
55 |
+
|
56 |
+
ready_to_chat_message = "Notebook uploaded and processed successfully. You are now ready to chat!"
|
57 |
+
await cl.Message(content=ready_to_chat_message).send()
|
58 |
+
|
59 |
+
logger.info("Chat started and notebook uploaded successfully.")
|
60 |
|
61 |
@cl.on_message
|
62 |
async def main(message: cl.Message):
|
63 |
+
|
64 |
# Retrieve the LangGraph chain from the session
|
65 |
+
tutor_chain = cl.user_session.get("tutor_chain")
|
66 |
|
67 |
+
if not tutor_chain:
|
68 |
await cl.Message(content="No document processing setup found. Please upload a Jupyter notebook first.").send()
|
69 |
return
|
70 |
|
71 |
# Create the initial state with the user message
|
72 |
user_message = message.content
|
73 |
+
state = TutorState(
|
74 |
+
messages=[HumanMessage(content=user_message)],
|
75 |
+
next="supervisor",
|
76 |
+
quiz=[],
|
77 |
+
quiz_created=False,
|
78 |
+
question_answered=False,
|
79 |
+
flashcards_created=False,
|
80 |
+
)
|
81 |
|
82 |
+
logger.info(f"Initial state: {state}")
|
83 |
|
84 |
# Process the message through the LangGraph chain
|
85 |
+
for s in tutor_chain.stream(state, {"recursion_limit": 10}):
|
86 |
+
logger.info(f"State after processing: {s}")
|
87 |
+
|
88 |
+
agent_state = next(iter(s.values()))
|
89 |
+
|
90 |
+
if "QAAgent" in s:
|
91 |
+
if s['QAAgent']['question_answered']:
|
92 |
+
qa_message = agent_state["messages"][-1].content
|
93 |
+
logger.info(f"Sending QAAgent message: {qa_message}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
await cl.Message(content=qa_message).send()
|
95 |
|
96 |
+
if "QuizAgent" in s:
|
97 |
+
if s['QuizAgent']['quiz_created']:
|
98 |
+
quiz_message = agent_state["messages"][-1].content
|
99 |
+
logger.info(f"Sending QuizAgent message: {quiz_message}")
|
100 |
+
await cl.Message(content=quiz_message).send()
|
101 |
+
|
102 |
+
if "FlashcardsAgent" in s:
|
103 |
+
if s['FlashcardsAgent']['flashcards_created']:
|
104 |
+
flashcards_message = agent_state["messages"][-1].content
|
105 |
+
logger.info(f"Sending FlashcardsAgent message: {flashcards_message}")
|
106 |
+
await cl.Message(content=flashcards_message).send()
|
107 |
+
|
108 |
+
# Search for the flashcard file in the specified directory
|
109 |
+
flashcard_directory = 'flashcards'
|
110 |
+
flashcard_file = None
|
111 |
+
latest_time = 0
|
112 |
+
for root, dirs, files in os.walk(flashcard_directory):
|
113 |
+
for file in files:
|
114 |
+
if file.startswith('flashcards_') and file.endswith('.csv'):
|
115 |
+
file_path = os.path.join(root, file)
|
116 |
+
file_time = os.path.getmtime(file_path)
|
117 |
+
if file_time > latest_time:
|
118 |
+
latest_time = file_time
|
119 |
+
flashcard_file = file_path
|
120 |
+
|
121 |
+
if flashcard_file:
|
122 |
+
logger.info(f"Flashcard path: {flashcard_file}")
|
123 |
+
# Use the File class to send the file
|
124 |
+
file_element = cl.File(name="Flashcards", path=flashcard_file, display="inline")
|
125 |
+
logger.info(f"Sending flashcards file: {file_element}")
|
126 |
+
|
127 |
+
await cl.Message(
|
128 |
+
content="Download the flashcards in .csv here:",
|
129 |
+
elements=[file_element]
|
130 |
+
).send()
|
131 |
+
|
132 |
+
logger.info("Reached END state.")
|
133 |
+
|
134 |
|
135 |
+
@cl.on_chat_end
|
136 |
+
async def end_chat():
|
137 |
+
# Clean up the flashcards directory
|
138 |
+
flashcard_directory = 'flashcards'
|
139 |
+
if os.path.exists(flashcard_directory):
|
140 |
+
shutil.rmtree(flashcard_directory)
|
141 |
+
os.makedirs(flashcard_directory)
|
{aims_tutor β notebook_tutor}/document_processing.py
RENAMED
@@ -6,7 +6,7 @@ from langchain.retrievers import MultiQueryRetriever
|
|
6 |
from langchain_openai.embeddings import OpenAIEmbeddings
|
7 |
from langchain_openai import ChatOpenAI
|
8 |
from dotenv import load_dotenv
|
9 |
-
from
|
10 |
|
11 |
# Load environment variables
|
12 |
load_dotenv()
|
|
|
6 |
from langchain_openai.embeddings import OpenAIEmbeddings
|
7 |
from langchain_openai import ChatOpenAI
|
8 |
from dotenv import load_dotenv
|
9 |
+
from notebook_tutor.utils import tiktoken_len
|
10 |
|
11 |
# Load environment variables
|
12 |
load_dotenv()
|
notebook_tutor/graph.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dotenv import load_dotenv
|
2 |
+
from langgraph.graph import END, StateGraph
|
3 |
+
from states import TutorState
|
4 |
+
from agents import create_agent, agent_node, create_team_supervisor, get_retrieve_information_tool, llm, flashcard_tool
|
5 |
+
from prompt_templates import PromptTemplates
|
6 |
+
import functools
|
7 |
+
|
8 |
+
# Load environment variables
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
# Create the LangGraph chain
|
12 |
+
def create_tutor_chain(retrieval_chain):
|
13 |
+
retrieve_information_tool = get_retrieve_information_tool(retrieval_chain)
|
14 |
+
|
15 |
+
# Create QA Agent
|
16 |
+
qa_agent = create_agent(
|
17 |
+
llm,
|
18 |
+
[retrieve_information_tool],
|
19 |
+
PromptTemplates().get_qa_agent_prompt(),
|
20 |
+
)
|
21 |
+
qa_node = functools.partial(agent_node, agent=qa_agent, name="QAAgent")
|
22 |
+
|
23 |
+
# Create Quiz Agent
|
24 |
+
quiz_agent = create_agent(
|
25 |
+
llm,
|
26 |
+
[retrieve_information_tool],
|
27 |
+
PromptTemplates().get_quiz_agent_prompt(),
|
28 |
+
)
|
29 |
+
quiz_node = functools.partial(agent_node, agent=quiz_agent, name="QuizAgent")
|
30 |
+
|
31 |
+
# Create Flashcards Agent
|
32 |
+
flashcards_agent = create_agent(
|
33 |
+
llm,
|
34 |
+
[retrieve_information_tool, flashcard_tool],
|
35 |
+
PromptTemplates().get_flashcards_agent_prompt(),
|
36 |
+
)
|
37 |
+
flashcards_node = functools.partial(agent_node, agent=flashcards_agent, name="FlashcardsAgent")
|
38 |
+
|
39 |
+
# Create Supervisor Agent
|
40 |
+
supervisor_agent = create_team_supervisor(
|
41 |
+
llm,
|
42 |
+
PromptTemplates().get_supervisor_agent_prompt(),
|
43 |
+
["QAAgent", "QuizAgent", "FlashcardsAgent"],
|
44 |
+
)
|
45 |
+
|
46 |
+
# Build the LangGraph
|
47 |
+
tutor_graph = StateGraph(TutorState)
|
48 |
+
tutor_graph.add_node("QAAgent", qa_node)
|
49 |
+
tutor_graph.add_node("QuizAgent", quiz_node)
|
50 |
+
tutor_graph.add_node("FlashcardsAgent", flashcards_node)
|
51 |
+
tutor_graph.add_node("supervisor", supervisor_agent)
|
52 |
+
|
53 |
+
tutor_graph.add_edge("QAAgent", "supervisor")
|
54 |
+
tutor_graph.add_edge("QuizAgent", "supervisor")
|
55 |
+
tutor_graph.add_edge("FlashcardsAgent", "supervisor")
|
56 |
+
tutor_graph.add_conditional_edges(
|
57 |
+
"supervisor",
|
58 |
+
lambda x: "FINISH" if x.get("quiz_created") or x.get("question_answered") or x.get("flashcards_created") else x["next"],
|
59 |
+
{"QAAgent": "QAAgent",
|
60 |
+
"QuizAgent": "QuizAgent",
|
61 |
+
"FlashcardsAgent": "FlashcardsAgent",
|
62 |
+
"FINISH": END},
|
63 |
+
)
|
64 |
+
|
65 |
+
tutor_graph.set_entry_point("supervisor")
|
66 |
+
return tutor_graph.compile()
|
{aims_tutor β notebook_tutor}/prompt_templates.py
RENAMED
@@ -27,6 +27,36 @@ class PromptTemplates:
|
|
27 |
Answer the query in a pretty format if the context is related to it; otherwise, answer: 'Sorry, I can't answer. Please ask another question.'
|
28 |
""")
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
def get_rag_qa_prompt(self):
|
31 |
-
# Returns the RAG QA prompt
|
32 |
return self.rag_QA_prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
Answer the query in a pretty format if the context is related to it; otherwise, answer: 'Sorry, I can't answer. Please ask another question.'
|
28 |
""")
|
29 |
|
30 |
+
self.QAAgent_prompt = """"You are a QA assistant who answers questions about the provided notebook content.
|
31 |
+
Provide the notebook code and context to answer the user's questions accurately and informatively."""
|
32 |
+
|
33 |
+
self.QuizAgent_prompt = """You are a quiz creator that generates quizzes based on the provided notebook content.
|
34 |
+
First, You MUST Use the retrieval_inforation_tool to gather context from the notebook to gather relevant and accurate information.
|
35 |
+
Next, create a 5-question quiz based on the information you have gathered. Include the answers at the end of the quiz.
|
36 |
+
Present the quiz to the user in a clear and concise manner."""
|
37 |
+
|
38 |
+
self.FlashcardsAgent_prompt = """
|
39 |
+
You are the Flashcard creator. Your mission is to create effective and concise flashcards based on the user's query and the content of the provided notebook. Your role involves the following tasks:
|
40 |
+
1. Analyze User Query: Understand the user's request and determine the key concepts and information they need to learn.
|
41 |
+
2. Search Notebook Content: Use the notebook content to gather relevant information and generate accurate and informative flashcards.
|
42 |
+
3. Generate Flashcards: Create a series of flashcards content with clear questions on the front and detailed answers on the back. Ensure that the flashcards cover the essential points and concepts requested by the user.
|
43 |
+
4. Export Flashcards: YOU MUST USE the flashcard_tool to create and export the flashcards in a format that can be easily imported into a flashcard management system, such as Anki.
|
44 |
+
5. Provide the list of flashcards in a clear and organized manner.
|
45 |
+
Remember, your goal is to help the user learn efficiently and effectively by breaking down the notebook content into manageable, repeatable flashcards."""
|
46 |
+
|
47 |
+
self.SupervisorAgent_prompt = "You are a supervisor tasked with managing a conversation between the following agents: QAAgent, QuizAgent, FlashcardsAgent. Given the user request, decide which agent should act next."
|
48 |
+
|
49 |
def get_rag_qa_prompt(self):
|
|
|
50 |
return self.rag_QA_prompt
|
51 |
+
|
52 |
+
def get_qa_agent_prompt(self):
|
53 |
+
return self.QAAgent_prompt
|
54 |
+
|
55 |
+
def get_quiz_agent_prompt(self):
|
56 |
+
return self.QuizAgent_prompt
|
57 |
+
|
58 |
+
def get_flashcards_agent_prompt(self):
|
59 |
+
return self.FlashcardsAgent_prompt
|
60 |
+
|
61 |
+
def get_supervisor_agent_prompt(self):
|
62 |
+
return self.SupervisorAgent_prompt
|
{aims_tutor β notebook_tutor}/retrieval.py
RENAMED
File without changes
|
notebook_tutor/states.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, TypedDict
|
2 |
+
from langchain_core.messages import BaseMessage
|
3 |
+
|
4 |
+
# Define the state for the system
|
5 |
+
class TutorState(TypedDict):
|
6 |
+
messages: List[BaseMessage]
|
7 |
+
next: str
|
8 |
+
quiz: List[dict]
|
9 |
+
quiz_created: bool
|
10 |
+
question_answered: bool
|
11 |
+
flashcards_created: bool
|
12 |
+
# flashcard_path: str
|
notebook_tutor/tools.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, Type, Annotated
|
2 |
+
from pydantic import BaseModel, Field
|
3 |
+
from langchain.tools import BaseTool
|
4 |
+
from langchain.callbacks.manager import (
|
5 |
+
AsyncCallbackManagerForToolRun,
|
6 |
+
CallbackManagerForToolRun,
|
7 |
+
)
|
8 |
+
import csv
|
9 |
+
import uuid
|
10 |
+
import os
|
11 |
+
|
12 |
+
class FlashcardInput(BaseModel):
|
13 |
+
flashcards: list = Field(description="A list of flashcards. Each flashcard should be a dictionary with 'question' and 'answer' keys.")
|
14 |
+
|
15 |
+
class FlashcardTool(BaseTool):
|
16 |
+
name = "create_flashcards"
|
17 |
+
description = "Create flashcards in a .csv format suitable for import into Anki"
|
18 |
+
args_schema: Type[BaseModel] = FlashcardInput
|
19 |
+
|
20 |
+
def _run(
|
21 |
+
self, flashcards: list, run_manager: Optional[CallbackManagerForToolRun] = None
|
22 |
+
) -> str:
|
23 |
+
"""Use the tool to create flashcards."""
|
24 |
+
filename = f"flashcards_{uuid.uuid4()}.csv"
|
25 |
+
|
26 |
+
save_path = os.path.join('flashcards', filename)
|
27 |
+
|
28 |
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
29 |
+
|
30 |
+
with open(save_path, 'w', newline='') as csvfile:
|
31 |
+
fieldnames = ['Front', 'Back']
|
32 |
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
33 |
+
|
34 |
+
writer.writeheader()
|
35 |
+
for card in flashcards:
|
36 |
+
writer.writerow({'Front': card['question'], 'Back': card['answer']})
|
37 |
+
|
38 |
+
print("\033[93m" + f"Flashcards successfully created and saved to {save_path}" + "\033[0m")
|
39 |
+
|
40 |
+
return "csv file created successfully."
|
41 |
+
|
42 |
+
async def _arun(
|
43 |
+
self, flashcards: list, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
|
44 |
+
) -> str:
|
45 |
+
"""Use the tool asynchronously."""
|
46 |
+
raise NotImplementedError("create_flashcards does not support async")
|
47 |
+
|
48 |
+
# Instantiate the tool
|
49 |
+
create_flashcards_tool = FlashcardTool()
|
50 |
+
|
51 |
+
class RetrievalChainWrapper:
|
52 |
+
def __init__(self, retrieval_chain):
|
53 |
+
self.retrieval_chain = retrieval_chain
|
54 |
+
|
55 |
+
def retrieve_information(
|
56 |
+
self,
|
57 |
+
query: Annotated[str, "query to ask the RAG tool"]
|
58 |
+
):
|
59 |
+
"""Use this tool to retrieve information about the provided notebook."""
|
60 |
+
response = self.retrieval_chain.invoke({"question": query})
|
61 |
+
return response["response"].content
|
{aims_tutor β notebook_tutor}/utils.py
RENAMED
File without changes
|