JulsdL commited on
Commit
0f64bae
·
1 Parent(s): 7b47aa3

Enhance chat functionality and improve flashcards handling in chainlit_frontend

Browse files

- Switched to GPT-4o model for improved chat interactions.
- Added shutil import for directory management.
- Implemented user feedback upon successful notebook upload.
- Transitioned from print statements to logger for consistent logging.
- Increased recursion limit for message processing to 10.
- Streamlined flashcards creation process, including automatic file discovery and cleanup.
- Added chat end handler to clean up the flashcards directory, ensuring a fresh start for each session.

.chainlit/config.toml DELETED
@@ -1,109 +0,0 @@
1
- [project]
2
- # Whether to enable telemetry (default: true). No personal data is collected.
3
- enable_telemetry = true
4
-
5
-
6
- # List of environment variables to be provided by each user to use the app.
7
- user_env = []
8
-
9
- # Duration (in seconds) during which the session is saved when the connection is lost
10
- session_timeout = 3600
11
-
12
- # Enable third parties caching (e.g LangChain cache)
13
- cache = false
14
-
15
- # Authorized origins
16
- allow_origins = ["*"]
17
-
18
- # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
19
- # follow_symlink = false
20
-
21
- [features]
22
- # Show the prompt playground
23
- prompt_playground = true
24
-
25
- # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
26
- unsafe_allow_html = false
27
-
28
- # Process and display mathematical expressions. This can clash with "$" characters in messages.
29
- latex = false
30
-
31
- # Automatically tag threads with the current chat profile (if a chat profile is used)
32
- auto_tag_thread = true
33
-
34
- # Authorize users to upload files with messages
35
- [features.multi_modal]
36
- enabled = true
37
- accept = ["*/*"]
38
- max_files = 20
39
- max_size_mb = 500
40
-
41
- # Allows user to use speech to text
42
- [features.speech_to_text]
43
- enabled = false
44
- # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
45
- # language = "en-US"
46
-
47
- [UI]
48
- # Name of the app and chatbot.
49
- name = "Chatbot"
50
-
51
- # Show the readme while the thread is empty.
52
- show_readme_as_default = true
53
-
54
- # Description of the app and chatbot. This is used for HTML tags.
55
- # description = ""
56
-
57
- # Large size content are by default collapsed for a cleaner ui
58
- default_collapse_content = true
59
-
60
- # The default value for the expand messages settings.
61
- default_expand_messages = false
62
-
63
- # Hide the chain of thought details from the user in the UI.
64
- hide_cot = false
65
-
66
- # Link to your github repo. This will add a github button in the UI's header.
67
- # github = ""
68
-
69
- # Specify a CSS file that can be used to customize the user interface.
70
- # The CSS file can be served from the public directory or via an external link.
71
- # custom_css = "/public/test.css"
72
-
73
- # Specify a Javascript file that can be used to customize the user interface.
74
- # The Javascript file can be served from the public directory.
75
- # custom_js = "/public/test.js"
76
-
77
- # Specify a custom font url.
78
- # custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap"
79
-
80
- # Specify a custom build directory for the frontend.
81
- # This can be used to customize the frontend code.
82
- # Be careful: If this is a relative path, it should not start with a slash.
83
- # custom_build = "./public/build"
84
-
85
- # Override default MUI light theme. (Check theme.ts)
86
- [UI.theme]
87
- #font_family = "Inter, sans-serif"
88
- [UI.theme.light]
89
- #background = "#FAFAFA"
90
- #paper = "#FFFFFF"
91
-
92
- [UI.theme.light.primary]
93
- #main = "#F80061"
94
- #dark = "#980039"
95
- #light = "#FFE7EB"
96
-
97
- # Override default MUI dark theme. (Check theme.ts)
98
- [UI.theme.dark]
99
- #background = "#FAFAFA"
100
- #paper = "#FFFFFF"
101
-
102
- [UI.theme.dark.primary]
103
- #main = "#F80061"
104
- #dark = "#980039"
105
- #light = "#FFE7EB"
106
-
107
-
108
- [meta]
109
- generated_by = "1.0.506"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.chainlit/translations/en-US.json DELETED
@@ -1,231 +0,0 @@
1
- {
2
- "components": {
3
- "atoms": {
4
- "buttons": {
5
- "userButton": {
6
- "menu": {
7
- "settings": "Settings",
8
- "settingsKey": "S",
9
- "APIKeys": "API Keys",
10
- "logout": "Logout"
11
- }
12
- }
13
- }
14
- },
15
- "molecules": {
16
- "newChatButton": {
17
- "newChat": "New Chat"
18
- },
19
- "tasklist": {
20
- "TaskList": {
21
- "title": "\ud83d\uddd2\ufe0f Task List",
22
- "loading": "Loading...",
23
- "error": "An error occured"
24
- }
25
- },
26
- "attachments": {
27
- "cancelUpload": "Cancel upload",
28
- "removeAttachment": "Remove attachment"
29
- },
30
- "newChatDialog": {
31
- "createNewChat": "Create new chat?",
32
- "clearChat": "This will clear the current messages and start a new chat.",
33
- "cancel": "Cancel",
34
- "confirm": "Confirm"
35
- },
36
- "settingsModal": {
37
- "settings": "Settings",
38
- "expandMessages": "Expand Messages",
39
- "hideChainOfThought": "Hide Chain of Thought",
40
- "darkMode": "Dark Mode"
41
- },
42
- "detailsButton": {
43
- "using": "Using",
44
- "running": "Running",
45
- "took_one": "Took {{count}} step",
46
- "took_other": "Took {{count}} steps"
47
- },
48
- "auth": {
49
- "authLogin": {
50
- "title": "Login to access the app.",
51
- "form": {
52
- "email": "Email address",
53
- "password": "Password",
54
- "noAccount": "Don't have an account?",
55
- "alreadyHaveAccount": "Already have an account?",
56
- "signup": "Sign Up",
57
- "signin": "Sign In",
58
- "or": "OR",
59
- "continue": "Continue",
60
- "forgotPassword": "Forgot password?",
61
- "passwordMustContain": "Your password must contain:",
62
- "emailRequired": "email is a required field",
63
- "passwordRequired": "password is a required field"
64
- },
65
- "error": {
66
- "default": "Unable to sign in.",
67
- "signin": "Try signing in with a different account.",
68
- "oauthsignin": "Try signing in with a different account.",
69
- "redirect_uri_mismatch": "The redirect URI is not matching the oauth app configuration.",
70
- "oauthcallbackerror": "Try signing in with a different account.",
71
- "oauthcreateaccount": "Try signing in with a different account.",
72
- "emailcreateaccount": "Try signing in with a different account.",
73
- "callback": "Try signing in with a different account.",
74
- "oauthaccountnotlinked": "To confirm your identity, sign in with the same account you used originally.",
75
- "emailsignin": "The e-mail could not be sent.",
76
- "emailverify": "Please verify your email, a new email has been sent.",
77
- "credentialssignin": "Sign in failed. Check the details you provided are correct.",
78
- "sessionrequired": "Please sign in to access this page."
79
- }
80
- },
81
- "authVerifyEmail": {
82
- "almostThere": "You're almost there! We've sent an email to ",
83
- "verifyEmailLink": "Please click on the link in that email to complete your signup.",
84
- "didNotReceive": "Can't find the email?",
85
- "resendEmail": "Resend email",
86
- "goBack": "Go Back",
87
- "emailSent": "Email sent successfully.",
88
- "verifyEmail": "Verify your email address"
89
- },
90
- "providerButton": {
91
- "continue": "Continue with {{provider}}",
92
- "signup": "Sign up with {{provider}}"
93
- },
94
- "authResetPassword": {
95
- "newPasswordRequired": "New password is a required field",
96
- "passwordsMustMatch": "Passwords must match",
97
- "confirmPasswordRequired": "Confirm password is a required field",
98
- "newPassword": "New password",
99
- "confirmPassword": "Confirm password",
100
- "resetPassword": "Reset Password"
101
- },
102
- "authForgotPassword": {
103
- "email": "Email address",
104
- "emailRequired": "email is a required field",
105
- "emailSent": "Please check the email address {{email}} for instructions to reset your password.",
106
- "enterEmail": "Enter your email address and we will send you instructions to reset your password.",
107
- "resendEmail": "Resend email",
108
- "continue": "Continue",
109
- "goBack": "Go Back"
110
- }
111
- }
112
- },
113
- "organisms": {
114
- "chat": {
115
- "history": {
116
- "index": {
117
- "showHistory": "Show history",
118
- "lastInputs": "Last Inputs",
119
- "noInputs": "Such empty...",
120
- "loading": "Loading..."
121
- }
122
- },
123
- "inputBox": {
124
- "input": {
125
- "placeholder": "Type your message here..."
126
- },
127
- "speechButton": {
128
- "start": "Start recording",
129
- "stop": "Stop recording"
130
- },
131
- "SubmitButton": {
132
- "sendMessage": "Send message",
133
- "stopTask": "Stop Task"
134
- },
135
- "UploadButton": {
136
- "attachFiles": "Attach files"
137
- },
138
- "waterMark": {
139
- "text": "Built with"
140
- }
141
- },
142
- "Messages": {
143
- "index": {
144
- "running": "Running",
145
- "executedSuccessfully": "executed successfully",
146
- "failed": "failed",
147
- "feedbackUpdated": "Feedback updated",
148
- "updating": "Updating"
149
- }
150
- },
151
- "dropScreen": {
152
- "dropYourFilesHere": "Drop your files here"
153
- },
154
- "index": {
155
- "failedToUpload": "Failed to upload",
156
- "cancelledUploadOf": "Cancelled upload of",
157
- "couldNotReachServer": "Could not reach the server",
158
- "continuingChat": "Continuing previous chat"
159
- },
160
- "settings": {
161
- "settingsPanel": "Settings panel",
162
- "reset": "Reset",
163
- "cancel": "Cancel",
164
- "confirm": "Confirm"
165
- }
166
- },
167
- "threadHistory": {
168
- "sidebar": {
169
- "filters": {
170
- "FeedbackSelect": {
171
- "feedbackAll": "Feedback: All",
172
- "feedbackPositive": "Feedback: Positive",
173
- "feedbackNegative": "Feedback: Negative"
174
- },
175
- "SearchBar": {
176
- "search": "Search"
177
- }
178
- },
179
- "DeleteThreadButton": {
180
- "confirmMessage": "This will delete the thread as well as it's messages and elements.",
181
- "cancel": "Cancel",
182
- "confirm": "Confirm",
183
- "deletingChat": "Deleting chat",
184
- "chatDeleted": "Chat deleted"
185
- },
186
- "index": {
187
- "pastChats": "Past Chats"
188
- },
189
- "ThreadList": {
190
- "empty": "Empty...",
191
- "today": "Today",
192
- "yesterday": "Yesterday",
193
- "previous7days": "Previous 7 days",
194
- "previous30days": "Previous 30 days"
195
- },
196
- "TriggerButton": {
197
- "closeSidebar": "Close sidebar",
198
- "openSidebar": "Open sidebar"
199
- }
200
- },
201
- "Thread": {
202
- "backToChat": "Go back to chat",
203
- "chatCreatedOn": "This chat was created on"
204
- }
205
- },
206
- "header": {
207
- "chat": "Chat",
208
- "readme": "Readme"
209
- }
210
- }
211
- },
212
- "hooks": {
213
- "useLLMProviders": {
214
- "failedToFetchProviders": "Failed to fetch providers:"
215
- }
216
- },
217
- "pages": {
218
- "Design": {},
219
- "Env": {
220
- "savedSuccessfully": "Saved successfully",
221
- "requiredApiKeys": "Required API Keys",
222
- "requiredApiKeysInfo": "To use this app, the following API keys are required. The keys are stored on your device's local storage."
223
- },
224
- "Page": {
225
- "notPartOfProject": "You are not part of this project."
226
- },
227
- "ResumeButton": {
228
- "resumeChat": "Resume Chat"
229
- }
230
- }
231
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore CHANGED
@@ -82,10 +82,16 @@ target/
82
  profile_default/
83
  ipython_config.py
84
 
 
 
 
 
 
 
85
  # pyenv
86
  # For a library or package, you might want to ignore these files since the code is
87
  # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
 
90
  # pipenv
91
  # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
 
82
  profile_default/
83
  ipython_config.py
84
 
85
+ # FLashcard directory
86
+ flashcards/
87
+
88
+ # .chainlit directory
89
+ .chainlit/
90
+
91
  # pyenv
92
  # For a library or package, you might want to ignore these files since the code is
93
  # intended to run in multiple environments; otherwise, check them in:
94
+ .python-version
95
 
96
  # pipenv
97
  # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
flashcards_cca7854c-91c2-47d5-872f-46132739ace0.csv ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Front,Back
2
+ What command is used to clone a GitHub repository in a notebook?,!git clone https://github.com/arcee-ai/DALM
3
+ How do you install or upgrade a Python package in a notebook?,!pip install --upgrade -q -e .
4
+ Which command installs the 'langchain' and 'langchain-community' libraries?,!pip install -qU langchain langchain-core langchain-community sentence_transformers
5
+ What is the command to install 'pymupdf' and 'faiss-cpu'?,!pip install -qU pymupdf faiss-cpu
6
+ How do you import the Pandas library in Python?,import pandas as pd
7
+ Which library provides the 'HuggingFaceEmbeddings' class?,from langchain_community.embeddings import HuggingFaceEmbeddings
8
+ How do you import the 'FAISS' vector store from the 'langchain_community' library?,from langchain_community.vectorstores import FAISS
9
+ What is the import statement for reading directories using the 'Llama Index' library?,from llama_index.core import SimpleDirectoryReader
10
+ Which import statement is used for parsing nodes in the 'Llama Index' library?,from llama_index.core.node_parser import SimpleNodeParser
11
+ How do you import the 'MetadataMode' schema from the 'Llama Index' library?,from llama_index.core.schema import MetadataMode
notebook_tutor/agents.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Annotated
2
+ from langchain_core.tools import tool
3
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
4
+ from langchain_core.messages import AIMessage
5
+ from langchain.agents import AgentExecutor, create_openai_functions_agent
6
+ from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
7
+ from langchain_openai import ChatOpenAI
8
+ from tools import create_flashcards_tool, RetrievalChainWrapper
9
+
10
+
11
+ # Instantiate the language model
12
+ llm = ChatOpenAI(model="gpt-4o")
13
+
14
+ # Function to create an instance of the retrieval tool wrapper
15
+ def get_retrieve_information_tool(retrieval_chain):
16
+ wrapper_instance = RetrievalChainWrapper(retrieval_chain)
17
+ return tool(wrapper_instance.retrieve_information)
18
+
19
+ # Instantiate the flashcard tool
20
+ flashcard_tool = create_flashcards_tool
21
+
22
+ # Function to create agents
23
+ def create_agent(
24
+ llm: ChatOpenAI,
25
+ tools: list,
26
+ system_prompt: str,
27
+ ) -> AgentExecutor:
28
+ """Create a function-calling agent and add it to the graph."""
29
+ system_prompt += "\nWork autonomously according to your specialty, using the tools available to you."
30
+ " Do not ask for clarification."
31
+ " Your other team members (and other teams) will collaborate with you with their own specialties."
32
+ " You are chosen for a reason! You are one of the following team members: {team_members}."
33
+ prompt = ChatPromptTemplate.from_messages(
34
+ [
35
+ (
36
+ "system",
37
+ system_prompt,
38
+ ),
39
+ MessagesPlaceholder(variable_name="messages"),
40
+ MessagesPlaceholder(variable_name="agent_scratchpad"),
41
+ ]
42
+ )
43
+ agent = create_openai_functions_agent(llm, tools, prompt)
44
+ executor = AgentExecutor(agent=agent, tools=tools, handle_parsing_errors=True)
45
+ return executor
46
+
47
+ # Function to create agent nodes
48
+ def agent_node(state, agent, name):
49
+ result = agent.invoke(state)
50
+ if 'messages' not in result:
51
+ raise ValueError(f"No messages found in agent state: {result}")
52
+ new_state = {"messages": state["messages"] + [AIMessage(content=result["output"], name=name)]}
53
+
54
+ # Set the appropriate flags and next state
55
+ if name == "QuizAgent":
56
+ new_state["quiz_created"] = True
57
+ elif name == "QAAgent":
58
+ new_state["question_answered"] = True
59
+ elif name == "FlashcardsAgent":
60
+ new_state["flashcards_created"] = True
61
+
62
+ return new_state
63
+
64
+ # Function to create the supervisor
65
+ def create_team_supervisor(llm: ChatOpenAI, system_prompt, members) -> AgentExecutor:
66
+ """An LLM-based router."""
67
+ options = ["WAIT", "FINISH"] + members
68
+ function_def = {
69
+ "name": "route",
70
+ "description": "Select the next role.",
71
+ "parameters": {
72
+ "title": "routeSchema",
73
+ "type": "object",
74
+ "properties": {
75
+ "next": {
76
+ "title": "Next",
77
+ "anyOf": [
78
+ {"enum": options},
79
+ ],
80
+ },
81
+ },
82
+ "required": ["next"],
83
+ },
84
+ }
85
+ prompt = ChatPromptTemplate.from_messages(
86
+ [
87
+ ("system", system_prompt),
88
+ MessagesPlaceholder(variable_name="messages"),
89
+ (
90
+ "system",
91
+ "Given the conversation above, who should act next?"
92
+ " Or should we WAIT for user input? Select one of: {options}",
93
+ ),
94
+ ]
95
+ ).partial(options=str(options), team_members=", ".join(members))
96
+ return (
97
+ prompt
98
+ | llm.bind_functions(functions=[function_def], function_call="route")
99
+ | JsonOutputFunctionsParser()
100
+ )
notebook_tutor/chainlit_frontend.py CHANGED
@@ -6,6 +6,7 @@ from document_processing import DocumentManager
6
  from retrieval import RetrievalManager
7
  from langchain_core.messages import AIMessage, HumanMessage
8
  from graph import create_tutor_chain, TutorState
 
9
 
10
  # Load environment variables
11
  load_dotenv()
@@ -18,7 +19,7 @@ logger = logging.getLogger(__name__)
18
  @cl.on_chat_start
19
  async def start_chat():
20
  settings = {
21
- "model": "gpt-3.5-turbo",
22
  "temperature": 0,
23
  "top_p": 1,
24
  "frequency_penalty": 0,
@@ -52,6 +53,9 @@ async def start_chat():
52
  tutor_chain = create_tutor_chain(retrieval_chain)
53
  cl.user_session.set("tutor_chain", tutor_chain)
54
 
 
 
 
55
  logger.info("Chat started and notebook uploaded successfully.")
56
 
57
  @cl.on_message
@@ -72,49 +76,65 @@ async def main(message: cl.Message):
72
  quiz_created=False,
73
  question_answered=False,
74
  flashcards_created=False,
75
- flashcard_filename="",
76
  )
77
 
78
- print("\033[93m" + f"Initial state: {state}" + "\033[0m")
79
 
80
  # Process the message through the LangGraph chain
81
- for s in tutor_chain.stream(state, {"recursion_limit": 3}):
82
- print("\033[93m" + f"State after processing: {s}" + "\033[0m")
83
 
84
  agent_state = next(iter(s.values()))
85
- print("\033[93m" + f"Agent state: {agent_state}" + "\033[0m")
86
 
87
  if "QAAgent" in s:
88
  if s['QAAgent']['question_answered']:
89
- print("\033[93m" + "************************Question answered**********************." + "\033[0m")
90
  qa_message = agent_state["messages"][-1].content
 
91
  await cl.Message(content=qa_message).send()
92
 
93
  if "QuizAgent" in s:
94
  if s['QuizAgent']['quiz_created']:
95
- print("\033[93m" + "************************Quiz created**********************." + "\033[0m")
96
  quiz_message = agent_state["messages"][-1].content
 
97
  await cl.Message(content=quiz_message).send()
98
 
99
  if "FlashcardsAgent" in s:
100
  if s['FlashcardsAgent']['flashcards_created']:
101
- print("\033[93m" + "************************Flashcards created**********************." + "\033[0m")
102
  flashcards_message = agent_state["messages"][-1].content
 
103
  await cl.Message(content=flashcards_message).send()
104
 
105
- flashcard_path = agent_state["flashcard_path"]
106
- print("\033[93m" + f"Flashcard path: {flashcard_path}" + "\033[0m")
107
-
108
-
109
- # Use the File class to send the file
110
- file_element = cl.File(name="Flashcards", path=flashcard_path)
111
- print("\033[93m" + f"Sending flashcards file: {file_element}" + "\033[0m")
112
- await cl.Message(
113
- content="Here are your flashcards:",
114
- elements=[file_element]
115
- ).send()
116
-
117
- final_state = s # Save the final state after processing
118
- print("\033[93m" + f"Final state: {final_state}" + "\033[0m")
119
-
120
- print("\033[93m" + "Reached END state." + "\033[0m")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  from retrieval import RetrievalManager
7
  from langchain_core.messages import AIMessage, HumanMessage
8
  from graph import create_tutor_chain, TutorState
9
+ import shutil
10
 
11
  # Load environment variables
12
  load_dotenv()
 
19
  @cl.on_chat_start
20
  async def start_chat():
21
  settings = {
22
+ "model": "gpt4o",
23
  "temperature": 0,
24
  "top_p": 1,
25
  "frequency_penalty": 0,
 
53
  tutor_chain = create_tutor_chain(retrieval_chain)
54
  cl.user_session.set("tutor_chain", tutor_chain)
55
 
56
+ ready_to_chat_message = "Notebook uploaded and processed successfully. You are now ready to chat!"
57
+ await cl.Message(content=ready_to_chat_message).send()
58
+
59
  logger.info("Chat started and notebook uploaded successfully.")
60
 
61
  @cl.on_message
 
76
  quiz_created=False,
77
  question_answered=False,
78
  flashcards_created=False,
 
79
  )
80
 
81
+ logger.info(f"Initial state: {state}")
82
 
83
  # Process the message through the LangGraph chain
84
+ for s in tutor_chain.stream(state, {"recursion_limit": 10}):
85
+ logger.info(f"State after processing: {s}")
86
 
87
  agent_state = next(iter(s.values()))
 
88
 
89
  if "QAAgent" in s:
90
  if s['QAAgent']['question_answered']:
 
91
  qa_message = agent_state["messages"][-1].content
92
+ logger.info(f"Sending QAAgent message: {qa_message}")
93
  await cl.Message(content=qa_message).send()
94
 
95
  if "QuizAgent" in s:
96
  if s['QuizAgent']['quiz_created']:
 
97
  quiz_message = agent_state["messages"][-1].content
98
+ logger.info(f"Sending QuizAgent message: {quiz_message}")
99
  await cl.Message(content=quiz_message).send()
100
 
101
  if "FlashcardsAgent" in s:
102
  if s['FlashcardsAgent']['flashcards_created']:
 
103
  flashcards_message = agent_state["messages"][-1].content
104
+ logger.info(f"Sending FlashcardsAgent message: {flashcards_message}")
105
  await cl.Message(content=flashcards_message).send()
106
 
107
+ # Search for the flashcard file in the specified directory
108
+ flashcard_directory = 'flashcards'
109
+ flashcard_file = None
110
+ latest_time = 0
111
+ for root, dirs, files in os.walk(flashcard_directory):
112
+ for file in files:
113
+ if file.startswith('flashcards_') and file.endswith('.csv'):
114
+ file_path = os.path.join(root, file)
115
+ file_time = os.path.getmtime(file_path)
116
+ if file_time > latest_time:
117
+ latest_time = file_time
118
+ flashcard_file = file_path
119
+
120
+ if flashcard_file:
121
+ logger.info(f"Flashcard path: {flashcard_file}")
122
+ # Use the File class to send the file
123
+ file_element = cl.File(name="Flashcards", path=flashcard_file, display="inline")
124
+ logger.info(f"Sending flashcards file: {file_element}")
125
+
126
+ await cl.Message(
127
+ content="Download the flashcards in .csv here:",
128
+ elements=[file_element]
129
+ ).send()
130
+
131
+ logger.info("Reached END state.")
132
+
133
+
134
+ @cl.on_chat_end
135
+ async def end_chat():
136
+ # Clean up the flashcards directory
137
+ flashcard_directory = 'flashcards'
138
+ if os.path.exists(flashcard_directory):
139
+ shutil.rmtree(flashcard_directory)
140
+ os.makedirs(flashcard_directory)
notebook_tutor/graph.py CHANGED
@@ -1,176 +1,45 @@
1
- from typing import Annotated
2
  from dotenv import load_dotenv
3
- from langchain_core.tools import tool
4
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
5
- from langchain_core.messages import AIMessage
6
- from langchain.agents import AgentExecutor, create_openai_functions_agent
7
- from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
8
- from langchain_openai import ChatOpenAI
9
  from langgraph.graph import END, StateGraph
10
- from tools import create_flashcards_tool
11
  from states import TutorState
 
 
12
  import functools
13
 
14
  # Load environment variables
15
  load_dotenv()
16
 
17
- # Instantiate the language model
18
- llm = ChatOpenAI(model="gpt-4o")
19
-
20
- class RetrievalChainWrapper:
21
- def __init__(self, retrieval_chain):
22
- self.retrieval_chain = retrieval_chain
23
-
24
- def retrieve_information(
25
- self,
26
- query: Annotated[str, "query to ask the RAG tool"]
27
- ):
28
- """Use this tool to retrieve information about the provided notebook."""
29
- response = self.retrieval_chain.invoke({"question": query})
30
- return response["response"].content
31
-
32
- # Create an instance of the wrapper
33
- def get_retrieve_information_tool(retrieval_chain):
34
- wrapper_instance = RetrievalChainWrapper(retrieval_chain)
35
- return tool(wrapper_instance.retrieve_information)
36
-
37
- # Instantiate the tools
38
- flashcard_tool = create_flashcards_tool
39
-
40
- # Function to create agents
41
- def create_agent(
42
- llm: ChatOpenAI,
43
- tools: list,
44
- system_prompt: str,
45
- ) -> AgentExecutor:
46
- """Create a function-calling agent and add it to the graph."""
47
- system_prompt += "\nWork autonomously according to your specialty, using the tools available to you."
48
- " Do not ask for clarification."
49
- " Your other team members (and other teams) will collaborate with you with their own specialties."
50
- " You are chosen for a reason! You are one of the following team members: {team_members}."
51
- prompt = ChatPromptTemplate.from_messages(
52
- [
53
- (
54
- "system",
55
- system_prompt,
56
- ),
57
- MessagesPlaceholder(variable_name="messages"),
58
- MessagesPlaceholder(variable_name="agent_scratchpad"),
59
- ]
60
- )
61
- agent = create_openai_functions_agent(llm, tools, prompt)
62
- executor = AgentExecutor(agent=agent, tools=tools, handle_parsing_errors=True)
63
- return executor
64
-
65
- # Function to create agent nodes
66
- def agent_node(state, agent, name):
67
- result = agent.invoke(state)
68
- if 'messages' not in result:
69
- raise ValueError(f"No messages found in agent state: {result}")
70
- new_state = {"messages": state["messages"] + [AIMessage(content=result["output"], name=name)]}
71
-
72
- print("\033[93m" + f"agent_node function state {state}" + "\033[0m")
73
-
74
- # Set the appropriate flags and next state
75
- if name == "QuizAgent":
76
- new_state["quiz_created"] = True
77
- elif name == "QAAgent":
78
- new_state["question_answered"] = True
79
- elif name == "FlashcardsAgent":
80
- new_state["flashcards_created"] = True
81
- print("\033[93m" + f"agent_node function result_output {result}" + "\033[0m")
82
- # new_state["flashcard_path"] = result["output"]
83
-
84
- new_state["next"] = "FINISH"
85
- return new_state
86
-
87
-
88
-
89
- # Function to create the supervisor
90
- def create_team_supervisor(llm: ChatOpenAI, system_prompt, members) -> AgentExecutor:
91
- """An LLM-based router."""
92
- options = ["WAIT", "FINISH"] + members
93
- function_def = {
94
- "name": "route",
95
- "description": "Select the next role.",
96
- "parameters": {
97
- "title": "routeSchema",
98
- "type": "object",
99
- "properties": {
100
- "next": {
101
- "title": "Next",
102
- "anyOf": [
103
- {"enum": options},
104
- ],
105
- },
106
- },
107
- "required": ["next"],
108
- },
109
- }
110
- prompt = ChatPromptTemplate.from_messages(
111
- [
112
- ("system", system_prompt),
113
- MessagesPlaceholder(variable_name="messages"),
114
- (
115
- "system",
116
- "Given the conversation above, who should act next?"
117
- " Or should we WAIT for user input? Select one of: {options}",
118
- ),
119
- ]
120
- ).partial(options=str(options), team_members=", ".join(members))
121
- return (
122
- prompt
123
- | llm.bind_functions(functions=[function_def], function_call="route")
124
- | JsonOutputFunctionsParser()
125
- )
126
-
127
-
128
  # Create the LangGraph chain
129
  def create_tutor_chain(retrieval_chain):
130
-
131
  retrieve_information_tool = get_retrieve_information_tool(retrieval_chain)
132
 
133
  # Create QA Agent
134
  qa_agent = create_agent(
135
  llm,
136
  [retrieve_information_tool],
137
- "You are a QA assistant who answers questions about the provided notebook content.",
138
  )
139
-
140
  qa_node = functools.partial(agent_node, agent=qa_agent, name="QAAgent")
141
 
142
  # Create Quiz Agent
143
  quiz_agent = create_agent(
144
  llm,
145
  [retrieve_information_tool],
146
- """You are a quiz creator that generates quizzes based on the provided notebook content.
147
- First, You MUST Use the retrieval_inforation_tool to gather context from the notebook to gather relevant and accurate information.
148
- Next, create a 5-question quiz based on the information you have gathered. Include the answers at the end of the quiz.
149
- Present the quiz to the user in a clear and concise manner."""
150
  )
151
-
152
  quiz_node = functools.partial(agent_node, agent=quiz_agent, name="QuizAgent")
153
 
154
  # Create Flashcards Agent
155
  flashcards_agent = create_agent(
156
  llm,
157
  [retrieve_information_tool, flashcard_tool],
158
- """
159
- You are the Flashcard creator. Your mission is to create effective and concise flashcards based on the user's query and the content of the provided notebook. Your role involves the following tasks:
160
- 1. Analyze User Query: Understand the user's request and determine the key concepts and information they need to learn.
161
- 2. Search Notebook Content: Use the notebook content to gather relevant information and generate accurate and informative flashcards.
162
- 3. Generate Flashcards: Create a series of flashcards content with clear questions on the front and detailed answers on the back. Ensure that the flashcards cover the essential points and concepts requested by the user.
163
- 4. Export Flashcards: Use the flashcard_tool to create and export the flashcards in a format that can be easily imported into a flashcard management system, such as Anki.
164
-
165
- Remember, your goal is to help the user learn efficiently and effectively by breaking down the notebook content into manageable, repeatable flashcards."""
166
  )
167
-
168
  flashcards_node = functools.partial(agent_node, agent=flashcards_agent, name="FlashcardsAgent")
169
 
170
  # Create Supervisor Agent
171
  supervisor_agent = create_team_supervisor(
172
  llm,
173
- "You are a supervisor tasked with managing a conversation between the following agents: QAAgent, QuizAgent, FlashcardsAgent. Given the user request, decide which agent should act next.",
174
  ["QAAgent", "QuizAgent", "FlashcardsAgent"],
175
  )
176
 
 
 
1
  from dotenv import load_dotenv
 
 
 
 
 
 
2
  from langgraph.graph import END, StateGraph
 
3
  from states import TutorState
4
+ from agents import create_agent, agent_node, create_team_supervisor, get_retrieve_information_tool, llm, flashcard_tool
5
+ from prompt_templates import PromptTemplates
6
  import functools
7
 
8
  # Load environment variables
9
  load_dotenv()
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  # Create the LangGraph chain
12
  def create_tutor_chain(retrieval_chain):
 
13
  retrieve_information_tool = get_retrieve_information_tool(retrieval_chain)
14
 
15
  # Create QA Agent
16
  qa_agent = create_agent(
17
  llm,
18
  [retrieve_information_tool],
19
+ PromptTemplates().get_qa_agent_prompt(),
20
  )
 
21
  qa_node = functools.partial(agent_node, agent=qa_agent, name="QAAgent")
22
 
23
  # Create Quiz Agent
24
  quiz_agent = create_agent(
25
  llm,
26
  [retrieve_information_tool],
27
+ PromptTemplates().get_quiz_agent_prompt(),
 
 
 
28
  )
 
29
  quiz_node = functools.partial(agent_node, agent=quiz_agent, name="QuizAgent")
30
 
31
  # Create Flashcards Agent
32
  flashcards_agent = create_agent(
33
  llm,
34
  [retrieve_information_tool, flashcard_tool],
35
+ PromptTemplates().get_flashcards_agent_prompt(),
 
 
 
 
 
 
 
36
  )
 
37
  flashcards_node = functools.partial(agent_node, agent=flashcards_agent, name="FlashcardsAgent")
38
 
39
  # Create Supervisor Agent
40
  supervisor_agent = create_team_supervisor(
41
  llm,
42
+ PromptTemplates().get_supervisor_agent_prompt(),
43
  ["QAAgent", "QuizAgent", "FlashcardsAgent"],
44
  )
45
 
notebook_tutor/prompt_templates.py CHANGED
@@ -27,6 +27,38 @@ class PromptTemplates:
27
  Answer the query in a pretty format if the context is related to it; otherwise, answer: 'Sorry, I can't answer. Please ask another question.'
28
  """)
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  def get_rag_qa_prompt(self):
31
  # Returns the RAG QA prompt
32
  return self.rag_QA_prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  Answer the query in a pretty format if the context is related to it; otherwise, answer: 'Sorry, I can't answer. Please ask another question.'
28
  """)
29
 
30
+ self.QAAgent_prompt = """"You are a QA assistant who answers questions about the provided notebook content.
31
+ Provide the notebook code and context to answer the user's questions accurately and informatively."""
32
+
33
+ self.QuizAgent_prompt = """You are a quiz creator that generates quizzes based on the provided notebook content.
34
+ First, You MUST Use the retrieval_inforation_tool to gather context from the notebook to gather relevant and accurate information.
35
+ Next, create a 5-question quiz based on the information you have gathered. Include the answers at the end of the quiz.
36
+ Present the quiz to the user in a clear and concise manner."""
37
+
38
+ self.FlashcardsAgent_prompt = """
39
+ You are the Flashcard creator. Your mission is to create effective and concise flashcards based on the user's query and the content of the provided notebook. Your role involves the following tasks:
40
+ 1. Analyze User Query: Understand the user's request and determine the key concepts and information they need to learn.
41
+ 2. Search Notebook Content: Use the notebook content to gather relevant information and generate accurate and informative flashcards.
42
+ 3. Generate Flashcards: Create a series of flashcards content with clear questions on the front and detailed answers on the back. Ensure that the flashcards cover the essential points and concepts requested by the user.
43
+ 4. Export Flashcards: Use the flashcard_tool to create and export the flashcards in a format that can be easily imported into a flashcard management system, such as Anki.
44
+ 5. DO NOT SHARE the link to the flashcard file directly with the user. Instead, provide the list of flashcards in a clear and organized manner.
45
+
46
+ Remember, your goal is to help the user learn efficiently and effectively by breaking down the notebook content into manageable, repeatable flashcards."""
47
+
48
+ self.SupervisorAgent_prompt = "You are a supervisor tasked with managing a conversation between the following agents: QAAgent, QuizAgent, FlashcardsAgent. Given the user request, decide which agent should act next."
49
+
50
  def get_rag_qa_prompt(self):
51
  # Returns the RAG QA prompt
52
  return self.rag_QA_prompt
53
+
54
+ def get_qa_agent_prompt(self):
55
+ return self.QAAgent_prompt
56
+
57
+ def get_quiz_agent_prompt(self):
58
+ return self.QuizAgent_prompt
59
+
60
+ def get_flashcards_agent_prompt(self):
61
+ return self.FlashcardsAgent_prompt
62
+
63
+ def get_supervisor_agent_prompt(self):
64
+ return self.SupervisorAgent_prompt
notebook_tutor/states.py CHANGED
@@ -9,4 +9,4 @@ class TutorState(TypedDict):
9
  quiz_created: bool
10
  question_answered: bool
11
  flashcards_created: bool
12
- flashcard_path: str
 
9
  quiz_created: bool
10
  question_answered: bool
11
  flashcards_created: bool
12
+ # flashcard_path: str
notebook_tutor/tools.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Optional, Type
2
  from pydantic import BaseModel, Field
3
  from langchain.tools import BaseTool
4
  from langchain.callbacks.manager import (
@@ -22,10 +22,10 @@ class FlashcardTool(BaseTool):
22
  ) -> str:
23
  """Use the tool to create flashcards."""
24
  filename = f"flashcards_{uuid.uuid4()}.csv"
25
- save_path = os.path.join('.files', filename)
26
- print("\033[91m" + f"Saving flashcards to {save_path}" + "\033[0m")
 
27
 
28
- os.makedirs(os.path.dirname(save_path), exist_ok=True) # Create directory if it doesn't exist
29
  with open(save_path, 'w', newline='') as csvfile:
30
  fieldnames = ['Front', 'Back']
31
  writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
@@ -33,7 +33,10 @@ class FlashcardTool(BaseTool):
33
  writer.writeheader()
34
  for card in flashcards:
35
  writer.writerow({'Front': card['question'], 'Back': card['answer']})
36
- return save_path
 
 
 
37
 
38
  async def _arun(
39
  self, flashcards: list, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
@@ -43,3 +46,15 @@ class FlashcardTool(BaseTool):
43
 
44
  # Instantiate the tool
45
  create_flashcards_tool = FlashcardTool()
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Type, Annotated
2
  from pydantic import BaseModel, Field
3
  from langchain.tools import BaseTool
4
  from langchain.callbacks.manager import (
 
22
  ) -> str:
23
  """Use the tool to create flashcards."""
24
  filename = f"flashcards_{uuid.uuid4()}.csv"
25
+ save_path = os.path.join('flashcards', filename)
26
+
27
+ os.makedirs(os.path.dirname(save_path), exist_ok=True)
28
 
 
29
  with open(save_path, 'w', newline='') as csvfile:
30
  fieldnames = ['Front', 'Back']
31
  writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
 
33
  writer.writeheader()
34
  for card in flashcards:
35
  writer.writerow({'Front': card['question'], 'Back': card['answer']})
36
+
37
+ print("\033[93m" + f"Flashcards successfully created and saved to {save_path}" + "\033[0m")
38
+
39
+ return "csv file created successfully."
40
 
41
  async def _arun(
42
  self, flashcards: list, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
 
46
 
47
  # Instantiate the tool
48
  create_flashcards_tool = FlashcardTool()
49
+
50
+ class RetrievalChainWrapper:
51
+ def __init__(self, retrieval_chain):
52
+ self.retrieval_chain = retrieval_chain
53
+
54
+ def retrieve_information(
55
+ self,
56
+ query: Annotated[str, "query to ask the RAG tool"]
57
+ ):
58
+ """Use this tool to retrieve information about the provided notebook."""
59
+ response = self.retrieval_chain.invoke({"question": query})
60
+ return response["response"].content