Nguyen Quang Truong commited on
Commit
10ad6b3
·
1 Parent(s): ed2793a

[Big upload]

Browse files
.chainlit/config.toml ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+
6
+ # List of environment variables to be provided by each user to use the app.
7
+ user_env = []
8
+
9
+ # Duration (in seconds) during which the session is saved when the connection is lost
10
+ session_timeout = 3600
11
+
12
+ # Enable third parties caching (e.g LangChain cache)
13
+ cache = false
14
+
15
+ # Authorized origins
16
+ allow_origins = ["*"]
17
+
18
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
19
+ # follow_symlink = false
20
+
21
+ [features]
22
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
23
+ unsafe_allow_html = false
24
+
25
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
26
+ latex = false
27
+
28
+ # Automatically tag threads with the current chat profile (if a chat profile is used)
29
+ auto_tag_thread = true
30
+
31
+ # Authorize users to spontaneously upload files with messages
32
+ [features.spontaneous_file_upload]
33
+ enabled = true
34
+ accept = ["*/*"]
35
+ max_files = 20
36
+ max_size_mb = 500
37
+
38
+ [features.audio]
39
+ # Threshold for audio recording
40
+ min_decibels = -45
41
+ # Delay for the user to start speaking in MS
42
+ initial_silence_timeout = 3000
43
+ # Delay for the user to continue speaking in MS. If the user stops speaking for this duration, the recording will stop.
44
+ silence_timeout = 1500
45
+ # Above this duration (MS), the recording will forcefully stop.
46
+ max_duration = 15000
47
+ # Duration of the audio chunks in MS
48
+ chunk_duration = 1000
49
+ # Sample rate of the audio
50
+ sample_rate = 44100
51
+
52
+ [UI]
53
+ # Name of the assistant.
54
+ name = "Assistant"
55
+
56
+ # Description of the assistant. This is used for HTML tags.
57
+ # description = ""
58
+
59
+ # Large size content are by default collapsed for a cleaner ui
60
+ default_collapse_content = true
61
+
62
+ # Hide the chain of thought details from the user in the UI.
63
+ hide_cot = false
64
+
65
+ # Link to your github repo. This will add a github button in the UI's header.
66
+ # github = ""
67
+
68
+ # Specify a CSS file that can be used to customize the user interface.
69
+ # The CSS file can be served from the public directory or via an external link.
70
+ # custom_css = "/public/test.css"
71
+
72
+ # Specify a Javascript file that can be used to customize the user interface.
73
+ # The Javascript file can be served from the public directory.
74
+ # custom_js = "/public/test.js"
75
+
76
+ # Specify a custom font url.
77
+ # custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap"
78
+
79
+ # Specify a custom meta image url.
80
+ # custom_meta_image_url = "https://chainlit-cloud.s3.eu-west-3.amazonaws.com/logo/chainlit_banner.png"
81
+
82
+ # Specify a custom build directory for the frontend.
83
+ # This can be used to customize the frontend code.
84
+ # Be careful: If this is a relative path, it should not start with a slash.
85
+ # custom_build = "./public/build"
86
+
87
+ [UI.theme]
88
+ default = "dark"
89
+ #layout = "wide"
90
+ #font_family = "Inter, sans-serif"
91
+ # Override default MUI light theme. (Check theme.ts)
92
+ [UI.theme.light]
93
+ #background = "#FAFAFA"
94
+ #paper = "#FFFFFF"
95
+
96
+ [UI.theme.light.primary]
97
+ #main = "#F80061"
98
+ #dark = "#980039"
99
+ #light = "#FFE7EB"
100
+ [UI.theme.light.text]
101
+ #primary = "#212121"
102
+ #secondary = "#616161"
103
+
104
+ # Override default MUI dark theme. (Check theme.ts)
105
+ [UI.theme.dark]
106
+ #background = "#FAFAFA"
107
+ #paper = "#FFFFFF"
108
+
109
+ [UI.theme.dark.primary]
110
+ #main = "#F80061"
111
+ #dark = "#980039"
112
+ #light = "#FFE7EB"
113
+ [UI.theme.dark.text]
114
+ #primary = "#EEEEEE"
115
+ #secondary = "#BDBDBD"
116
+
117
+ [meta]
118
+ generated_by = "1.1.304"
.chainlit/translations/en-US.json ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "components": {
3
+ "atoms": {
4
+ "buttons": {
5
+ "userButton": {
6
+ "menu": {
7
+ "settings": "Settings",
8
+ "settingsKey": "S",
9
+ "APIKeys": "API Keys",
10
+ "logout": "Logout"
11
+ }
12
+ }
13
+ }
14
+ },
15
+ "molecules": {
16
+ "newChatButton": {
17
+ "newChat": "New Chat"
18
+ },
19
+ "tasklist": {
20
+ "TaskList": {
21
+ "title": "\ud83d\uddd2\ufe0f Task List",
22
+ "loading": "Loading...",
23
+ "error": "An error occurred"
24
+ }
25
+ },
26
+ "attachments": {
27
+ "cancelUpload": "Cancel upload",
28
+ "removeAttachment": "Remove attachment"
29
+ },
30
+ "newChatDialog": {
31
+ "createNewChat": "Create new chat?",
32
+ "clearChat": "This will clear the current messages and start a new chat.",
33
+ "cancel": "Cancel",
34
+ "confirm": "Confirm"
35
+ },
36
+ "settingsModal": {
37
+ "settings": "Settings",
38
+ "expandMessages": "Expand Messages",
39
+ "hideChainOfThought": "Hide Chain of Thought",
40
+ "darkMode": "Dark Mode"
41
+ },
42
+ "detailsButton": {
43
+ "using": "Using",
44
+ "used": "Used"
45
+ },
46
+ "auth": {
47
+ "authLogin": {
48
+ "title": "Login to access the app.",
49
+ "form": {
50
+ "email": "Email address",
51
+ "password": "Password",
52
+ "noAccount": "Don't have an account?",
53
+ "alreadyHaveAccount": "Already have an account?",
54
+ "signup": "Sign Up",
55
+ "signin": "Sign In",
56
+ "or": "OR",
57
+ "continue": "Continue",
58
+ "forgotPassword": "Forgot password?",
59
+ "passwordMustContain": "Your password must contain:",
60
+ "emailRequired": "email is a required field",
61
+ "passwordRequired": "password is a required field"
62
+ },
63
+ "error": {
64
+ "default": "Unable to sign in.",
65
+ "signin": "Try signing in with a different account.",
66
+ "oauthsignin": "Try signing in with a different account.",
67
+ "redirect_uri_mismatch": "The redirect URI is not matching the oauth app configuration.",
68
+ "oauthcallbackerror": "Try signing in with a different account.",
69
+ "oauthcreateaccount": "Try signing in with a different account.",
70
+ "emailcreateaccount": "Try signing in with a different account.",
71
+ "callback": "Try signing in with a different account.",
72
+ "oauthaccountnotlinked": "To confirm your identity, sign in with the same account you used originally.",
73
+ "emailsignin": "The e-mail could not be sent.",
74
+ "emailverify": "Please verify your email, a new email has been sent.",
75
+ "credentialssignin": "Sign in failed. Check the details you provided are correct.",
76
+ "sessionrequired": "Please sign in to access this page."
77
+ }
78
+ },
79
+ "authVerifyEmail": {
80
+ "almostThere": "You're almost there! We've sent an email to ",
81
+ "verifyEmailLink": "Please click on the link in that email to complete your signup.",
82
+ "didNotReceive": "Can't find the email?",
83
+ "resendEmail": "Resend email",
84
+ "goBack": "Go Back",
85
+ "emailSent": "Email sent successfully.",
86
+ "verifyEmail": "Verify your email address"
87
+ },
88
+ "providerButton": {
89
+ "continue": "Continue with {{provider}}",
90
+ "signup": "Sign up with {{provider}}"
91
+ },
92
+ "authResetPassword": {
93
+ "newPasswordRequired": "New password is a required field",
94
+ "passwordsMustMatch": "Passwords must match",
95
+ "confirmPasswordRequired": "Confirm password is a required field",
96
+ "newPassword": "New password",
97
+ "confirmPassword": "Confirm password",
98
+ "resetPassword": "Reset Password"
99
+ },
100
+ "authForgotPassword": {
101
+ "email": "Email address",
102
+ "emailRequired": "email is a required field",
103
+ "emailSent": "Please check the email address {{email}} for instructions to reset your password.",
104
+ "enterEmail": "Enter your email address and we will send you instructions to reset your password.",
105
+ "resendEmail": "Resend email",
106
+ "continue": "Continue",
107
+ "goBack": "Go Back"
108
+ }
109
+ }
110
+ },
111
+ "organisms": {
112
+ "chat": {
113
+ "history": {
114
+ "index": {
115
+ "showHistory": "Show history",
116
+ "lastInputs": "Last Inputs",
117
+ "noInputs": "Such empty...",
118
+ "loading": "Loading..."
119
+ }
120
+ },
121
+ "inputBox": {
122
+ "input": {
123
+ "placeholder": "Type your message here..."
124
+ },
125
+ "speechButton": {
126
+ "start": "Start recording",
127
+ "stop": "Stop recording"
128
+ },
129
+ "SubmitButton": {
130
+ "sendMessage": "Send message",
131
+ "stopTask": "Stop Task"
132
+ },
133
+ "UploadButton": {
134
+ "attachFiles": "Attach files"
135
+ },
136
+ "waterMark": {
137
+ "text": "Built with"
138
+ }
139
+ },
140
+ "Messages": {
141
+ "index": {
142
+ "running": "Running",
143
+ "executedSuccessfully": "executed successfully",
144
+ "failed": "failed",
145
+ "feedbackUpdated": "Feedback updated",
146
+ "updating": "Updating"
147
+ }
148
+ },
149
+ "dropScreen": {
150
+ "dropYourFilesHere": "Drop your files here"
151
+ },
152
+ "index": {
153
+ "failedToUpload": "Failed to upload",
154
+ "cancelledUploadOf": "Cancelled upload of",
155
+ "couldNotReachServer": "Could not reach the server",
156
+ "continuingChat": "Continuing previous chat"
157
+ },
158
+ "settings": {
159
+ "settingsPanel": "Settings panel",
160
+ "reset": "Reset",
161
+ "cancel": "Cancel",
162
+ "confirm": "Confirm"
163
+ }
164
+ },
165
+ "threadHistory": {
166
+ "sidebar": {
167
+ "filters": {
168
+ "FeedbackSelect": {
169
+ "feedbackAll": "Feedback: All",
170
+ "feedbackPositive": "Feedback: Positive",
171
+ "feedbackNegative": "Feedback: Negative"
172
+ },
173
+ "SearchBar": {
174
+ "search": "Search"
175
+ }
176
+ },
177
+ "DeleteThreadButton": {
178
+ "confirmMessage": "This will delete the thread as well as it's messages and elements.",
179
+ "cancel": "Cancel",
180
+ "confirm": "Confirm",
181
+ "deletingChat": "Deleting chat",
182
+ "chatDeleted": "Chat deleted"
183
+ },
184
+ "index": {
185
+ "pastChats": "Past Chats"
186
+ },
187
+ "ThreadList": {
188
+ "empty": "Empty...",
189
+ "today": "Today",
190
+ "yesterday": "Yesterday",
191
+ "previous7days": "Previous 7 days",
192
+ "previous30days": "Previous 30 days"
193
+ },
194
+ "TriggerButton": {
195
+ "closeSidebar": "Close sidebar",
196
+ "openSidebar": "Open sidebar"
197
+ }
198
+ },
199
+ "Thread": {
200
+ "backToChat": "Go back to chat",
201
+ "chatCreatedOn": "This chat was created on"
202
+ }
203
+ },
204
+ "header": {
205
+ "chat": "Chat",
206
+ "readme": "Readme"
207
+ }
208
+ }
209
+ },
210
+ "hooks": {
211
+ "useLLMProviders": {
212
+ "failedToFetchProviders": "Failed to fetch providers:"
213
+ }
214
+ },
215
+ "pages": {
216
+ "Design": {},
217
+ "Env": {
218
+ "savedSuccessfully": "Saved successfully",
219
+ "requiredApiKeys": "Required API Keys",
220
+ "requiredApiKeysInfo": "To use this app, the following API keys are required. The keys are stored on your device's local storage."
221
+ },
222
+ "Page": {
223
+ "notPartOfProject": "You are not part of this project."
224
+ },
225
+ "ResumeButton": {
226
+ "resumeChat": "Resume Chat"
227
+ }
228
+ }
229
+ }
Agent/prompts/cypher_examples.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ examples:
2
+ - question_example: Which companies located in 'San Francisco' are hiring for 'Data Scientist' roles with a 'Master's Degree' requirement?
3
+ cypher_example: |
4
+ MATCH (j:Job)<-[:RECRUITS]-(c:Company)-[:LOCATES_IN]->(l:Location)
5
+ MATCH (j)-[:REQUIRES]->(e:Education)
6
+ WHERE toLower(j.name) CONTAINS 'data scientist' AND toLower(l.name) CONTAINS 'san francisco' AND toLower(e.name) CONTAINS "master"
7
+ RETURN DISTINCT c.name AS company
8
+
9
+ - question_example: What are the most common skills required for 'Product Manager' jobs across different industries?
10
+ cypher_example: |
11
+ MATCH (j:Job)-[:REQUIRES]->(s:Skill)
12
+ WHERE toLower(j.name) CONTAINS "product manager"
13
+ RETURN s.name, count(*) AS skill_count
14
+ ORDER BY skill_count DESC
15
+ LIMIT 10
16
+
17
+ - question_example: Find all jobs that require at least 5 years of experience and a 'Bachelor's Degree' in 'Computer Science'
18
+ cypher_example: |
19
+ MATCH (we:Work_Exper)<-[:REQUIRES]-(j:Job)-[:REQUIRES]->(e:Education)
20
+ WHERE toLower(e.name) CONTAINS "bachelor" AND toLower(e.fields) CONTAINS "computer science" AND toLower(we.duration) CONTAINS "5 years"
21
+ RETURN j AS job
22
+
23
+ - question_example: Find companies recruiting "Machine Learning" jobs and their corresponding job titles.
24
+ cypher_example: |
25
+ MATCH (company: Company)-[:RECRUITS]->(job: Job)
26
+ WHERE job.name CONTAINS "Machine Learning"
27
+ RETURN company.name as company_name, job.name as job_title
28
+
29
+ - question_example: Machine Learning job requires?
30
+ cypher_example: |
31
+ MATCH (j:Job)
32
+ WHERE toLower(j.name) CONTAINS toLower("Machine Learning")
33
+ OPTIONAL MATCH (j)-[:REQUIRES]->(s:Skill)
34
+ OPTIONAL MATCH (j)-[:REQUIRES]->(e:Education)
35
+ OPTIONAL MATCH (j)-[:REQUIRES]->(we:Work_Exper)
36
+ RETURN s.name AS skill_requirements, e.name AS education_requirements, we.duration AS work_experience_requirements
Agent/prompts/cypher_instruct.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prefix: |
2
+ You are an expert Neo4j Developer translating user questions into Cypher statement.
3
+ The queries should focus on using the CONTAINS keyword with toLower() to search for specific text patterns within node properties.
4
+ Must utilize MATCH to choose relevance node and relationships.
5
+ Utilize WHERE to filter data by using node properties.
6
+ Utilize RETURN to return results with aliases.
7
+
8
+ Instructions:
9
+ Use only the provided relationship types and properties in the schema.
10
+ Do not use any other relationship types or properties that are not provided.
11
+ Only respond to questions that require you to construct a Cypher statement.
12
+ Do not include any explanations or apologies in your responses.
13
+
14
+ Examples:
15
+
16
+ example_template: |
17
+ Question: {question_example}
18
+ Cypher: {cypher_example}
19
+
20
+ suffix: |
21
+ Schema: {schema}
22
+
23
+ Question: {{question}}
24
+ Cypher:
Agent/prompts/cypher_prompt.yaml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ input_variables: [schema, question]
2
+ output_parser: null
3
+ template: |
4
+ Task:Generate Cypher statement to query a graph database.
5
+ Instructions:
6
+ Use only the provided relationship types and properties in the schema.
7
+ Do not use any other relationship types or properties that are not provided.
8
+ Schema:
9
+ {schema}
10
+ Note: Do not include any explanations or apologies in your responses.
11
+ Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
12
+ Do not include any text except the generated Cypher statement.
13
+
14
+ Examples:
15
+ Find all jobs in the 'Software Engineering' industry that offer remote work options and require 'Python' skills?
16
+ MATCH (j:Job)
17
+ WHERE j.name CONTAINS 'Software Engineer'
18
+ AND j.work_mode = 'Remote'
19
+ AND (j)-[:REQUIRES]->(:Skill {{name: "Python"}})
20
+ RETURN j AS job
21
+
22
+ Which companies located in 'San Francisco' are hiring for 'Data Scientist' roles with a 'Master's Degree' requirement?
23
+ MATCH (c:Company)-[:LOCATES_IN]->(l:Location {{name: "San Francisco"}})
24
+ WHERE (c)-[:RECRUITES]->(j:Job {{name: "Data Scientist"}})
25
+ AND (j)-[:REQUIRES]->(e:Education {{name: "Master's Degree"}})
26
+ RETURN DISTINCT c.name AS company
27
+
28
+ What are the most common skills required for 'Product Manager' jobs across different industries?
29
+ MATCH (j:Job {{name: "Product Manager"}})-[:REQUIRES]->(s:Skill)
30
+ RETURN s.name, count(*) AS skill_count
31
+ ORDER BY skill_count DESC
32
+ LIMIT 10
33
+
34
+ Find all jobs that require at least 5 years of experience and a 'Bachelor's Degree' in 'Computer Science':
35
+ MATCH (j:Job)-[:REQUIRES]->(e:Education {{name: "Bachelor's Degree", fields: "Computer Science"}})
36
+ WHERE (j)-[:REQUIRES]->(we:Work_Exper {{duration: "5 years"}})
37
+ RETURN j AS job
38
+
39
+ Identify companies that are subsidiaries of 'Google' and are recruiting for 'Software Engineer' roles with 'Senior' level.
40
+ MATCH (g:Company {{name: "Google"}})<-[:SUBDIARY]-(c:Company)
41
+ WHERE (c)-[:RECRUITES]->(j:Job {{name: "Software Engineer"}})
42
+ AND (j)-[:AT_LEVEL]->(wl:Work_LV {{name: "Senior"}})
43
+ RETURN DISTINCT c.name AS company
44
+
45
+ Find companies recruiting "Machine Learning" jobs and their corresponding job titles.
46
+ MATCH (company: Company)-[:RECRUITES]->(job: Job)
47
+ WHERE job.name CONTAINS "Machine Learning"
48
+ RETURN company.name as company_name, job.name as job_title
49
+
50
+ Show job description of Machine Learning job at KMS
51
+ MATCH (company:Company)-[:RECRUITS]->(job:Job)-[r]->(node)
52
+ WHERE job.name CONTAINS "Machine Learning" AND company.name CONTAINS "KMS"
53
+ RETURN job, node
54
+
55
+
56
+ The question is:
57
+ {question}
58
+
59
+ template_format: f-string
Agent/prompts/qa_prompt.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ input_variables: [context, question]
2
+ output_parser: null
3
+ template: |
4
+ Task: answer the question you are given based on the context provided.
5
+ Instructions:
6
+ You are an assistant that helps to form nice and human understandable answers.
7
+ Use the context information provided to generate a well organized and comprehensive answer to the user's question.
8
+ When the provided information contains multiple elements, structure your answer as a bulleted or numbered list to enhance clarity and readability.
9
+ You must use the information to construct your answer.
10
+ The provided information is authoritative; do not doubt it or try to use your internal knowledge to correct it.
11
+ Make the answer sound like a response to the question without mentioning that you based the result on the given information.
12
+ If there is no information provided, say that the knowledge base returned empty results.
13
+ You should answer result in Vietnamese.
14
+
15
+ Here's the information:
16
+ {context}
17
+
18
+ Question: {question}
19
+ Answer:
20
+ template_format: f-string
Agent/prompts/react_prompt.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Answer the following questions as best you can.
2
+ If the knowledge graph provides enough information, immediately answer the question.
3
+ You have access to the following tools:
4
+ {tools}
5
+
6
+ If the user does not provide enough information, use previous chat history to answer.
7
+ Previous chat history:
8
+ {chat_history}
9
+
10
+ Use the following format:
11
+
12
+ Question: the input question you must answer
13
+ Thought: you should always think about what to do
14
+ Action: the action to take, should be one of [{tool_names}]
15
+ Action Input: the input to the action
16
+ Observation: the result of the action
17
+ ... (this Thought/Action/Action Input/Observation can repeat N times)
18
+ Thought: I now know the final answer
19
+ Final Answer: the final answer to the original input question
20
+
21
+ Begin!
22
+
23
+ Question: {input}
24
+ Thought:{agent_scratchpad}
Agent/prompts/react_prompt_v2.txt ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are an assistant that helps to form nice and human understandable answers.
2
+ Use the context information provided to generate a well organized and comprehensive answer to the user's question.
3
+ When the provided information contains multiple elements, structure your answer as a bulleted or numbered list to enhance clarity and readability.
4
+ You must use the information to construct your answer.
5
+ The provided information is authoritative; do not doubt it or try to use your internal knowledge to correct it.
6
+ Make the answer sound like a response to the question without mentioning that you based the result on the given information.
7
+ If there is no information provided, say that the knowledge base returned empty results.
8
+ You are an assistant who helps users find suitable jobs by answering questions related to recruitment information from companies' job postings.
9
+ You MUST answer briefly but with complete information in Markdown format.
10
+ You MUST bold phrases related to jobs, skills, companies, etc.
11
+
12
+
13
+ TOOLS:
14
+
15
+ ------
16
+
17
+ Assistant has access to the following tools:
18
+
19
+ {tools}
20
+
21
+ To use a tool, please use the following format:
22
+
23
+ ```
24
+ Thought: Do I need to use a tool? Yes
25
+ Action: the action to take, should be one of [{tool_names}]
26
+ Action Input: the input to the action
27
+ Observation: the result of the action
28
+ ```
29
+
30
+ You MUST prioritize searching on the Knowledge Graph. Unless the Knowledge Graph does not have enough information, you should not search on the web.
31
+ You MUST not duplicate queries.
32
+ If both of you do not provide enough information, you must answer "I cannot answer this question."
33
+
34
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
35
+
36
+ ```
37
+ Thought: Do I need to use a tool? No
38
+ Final Answer: [your response here]
39
+ ```
40
+
41
+ Begin!
42
+
43
+ Previous conversation history:
44
+ {chat_history}
45
+
46
+ New input: {input}
47
+
48
+ {agent_scratchpad}
Agent/prompts/schema.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Node properties:
2
+ Job {{name: STRING, date_posted: DATE, status: STRING, responsibility: STRING, workLevel: STRING, benefitCompensation: STRING, workMode: STRING}}
3
+ Location {{name: STRING, locationType: STRING}}
4
+ Education {{name: STRING, majors: STRING, status: STRING}}
5
+ Skill {{name: STRING}}
6
+ Company {{name: STRING}}
7
+ Sector {{name: STRING}}
8
+ WorkExper {{name: STRING, yearsOfExper: STRING}}
9
+ Relationship properties:
10
+
11
+ The relationships:
12
+ (:Job)-[:REQUIRES]->(:Education)
13
+ (:Job)-[:REQUIRES]->(:Skill)
14
+ (:Job)-[:REQUIRES]->(:WorkExper)
15
+ (:Job)-[:FROM]->(:Company)
16
+ (:Job)-[:WORK_AT]->(:Location)
17
+ (:Skill)-[:HYPERNYM]->(:Skill)
18
+ (:Company)-[:OPERATED_IN]->(:Sector)
19
+ (:Company)-[:RECRUITS]->(:Job)
20
+ (:Company)-[:LOCATED_IN]->(:Location)
Agent/prompts/temp.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand in Markdown format.
2
+ Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
3
+ Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Agent/tools/kg_search.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import yaml
3
+ from dotenv import load_dotenv
4
+ from langchain_core.example_selectors import SemanticSimilarityExampleSelector
5
+ from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate
6
+ from langchain_google_genai import ChatGoogleGenerativeAI
7
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings
8
+ from langchain_community.vectorstores import FAISS
9
+ from langchain.schema import AIMessage, HumanMessage, SystemMessage
10
+ from langchain.schema.output_parser import StrOutputParser
11
+ from langchain.tools import BaseTool, StructuredTool, tool
12
+ from langchain_community.graphs import Neo4jGraph
13
+ # from utils import utils
14
+
15
+
16
+ # Question-Cypher pair examples
17
+ with open("Agent/prompts/cypher_examples.yaml", "r") as f:
18
+ example_pairs = yaml.safe_load(f)
19
+
20
+ examples = example_pairs["examples"]
21
+
22
+ # LLM for choose the best similar examples
23
+ load_dotenv()
24
+ os.environ["GOOGLE_API_KEY"] = os.getenv("GEMINI_API_KEY")
25
+
26
+ embedding_model = GoogleGenerativeAIEmbeddings(
27
+ model= "models/text-embedding-004"
28
+ )
29
+
30
+ example_selector = SemanticSimilarityExampleSelector.from_examples(
31
+ examples = examples,
32
+ embeddings = embedding_model,
33
+ vectorstore_cls = FAISS,
34
+ k = 1
35
+ )
36
+
37
+ # Load schema, prefix, suffix
38
+ with open("Agent/prompts/schema.txt", "r") as file:
39
+ schema = file.read()
40
+
41
+ with open("Agent/prompts/cypher_instruct.yaml", "r") as file:
42
+ instruct = yaml.safe_load(file)
43
+
44
+ example_prompt = PromptTemplate(
45
+ input_variables = ["question_example", "cypher_example"],
46
+ template = instruct["example_template"]
47
+ )
48
+
49
+ dynamic_prompt = FewShotPromptTemplate(
50
+ example_selector = example_selector,
51
+ example_prompt = example_prompt,
52
+ prefix = instruct["prefix"],
53
+ suffix = instruct["suffix"].format(schema=schema),
54
+ input_variables = ["question"]
55
+ )
56
+
57
+
58
+ def generate_cypher(question: str) -> str:
59
+ """Make Cypher query from given question."""
60
+ load_dotenv()
61
+
62
+ # Set up Neo4J & Gemini API
63
+ os.environ["NEO4J_URI"] = os.getenv("NEO4J_URI")
64
+ os.environ["NEO4J_USERNAME"] = os.getenv("NEO4J_USERNAME")
65
+ os.environ["NEO4J_PASSWORD"] = os.getenv("NEO4J_PASSWORD")
66
+ os.environ["GOOGLE_API_KEY"] = os.getenv("GEMINI_API_KEY")
67
+
68
+ gemini_chat = ChatGoogleGenerativeAI(
69
+ model= "gemini-1.5-flash-latest"
70
+ )
71
+
72
+ chat_messages = [
73
+ SystemMessage(content= dynamic_prompt.format(question=question)),
74
+ ]
75
+
76
+
77
+ output_parser = StrOutputParser()
78
+ cypher_statement = []
79
+ chain = dynamic_prompt | gemini_chat | output_parser
80
+ cypher_statement = chain.invoke({"question": question})
81
+ cypher_statement = cypher_statement.replace("```", "").replace("cypher", "").strip()
82
+
83
+ return cypher_statement
84
+
85
+ def run_cypher(question, cypher_statement: str) -> str:
86
+ """Return result of Cypher query from Knowledge Graph."""
87
+ knowledge_graph = Neo4jGraph()
88
+ result = knowledge_graph.query(cypher_statement)
89
+ print(f"\nCypher Result:\n{result}")
90
+
91
+ gemini_chat = ChatGoogleGenerativeAI(
92
+ model= "gemini-1.5-flash-latest"
93
+ )
94
+
95
+ answer_prompt = f"""
96
+ Generate a concise and informative summary of the results in a polite and easy-to-understand manner based on question and Cypher query response.
97
+ Question: {question}
98
+ Response: {str(result)}
99
+
100
+ Avoid repeat information.
101
+ If response is empty, you should answer "Knowledge graph doesn't have enough information".
102
+ Answer:
103
+ """
104
+
105
+ sys_answer_prompt = [
106
+ SystemMessage(content= answer_prompt),
107
+ HumanMessage(content="Provide information about question from knowledge graph")
108
+ ]
109
+
110
+ response = gemini_chat.invoke(sys_answer_prompt)
111
+ answer = response.content
112
+ return answer
113
+
114
+ def lookup_kg(question: str) -> str:
115
+ """Based on question, make and run Cypher statements.
116
+ question: str
117
+ Raw question from user input
118
+ """
119
+ cypher_statement = generate_cypher(question)
120
+ cypher_statement = cypher_statement.replace("cypher", "").replace("```", "").strip()
121
+ print(f"\nQuery:\n {cypher_statement}")
122
+
123
+ try:
124
+ answer = run_cypher(question, cypher_statement)
125
+ except:
126
+ answer = "Knowledge graph doesn't have enough information\n"
127
+
128
+ return answer
129
+
130
+
131
+ if __name__ == "__main__":
132
+ question = "Have any company is recruiting Machine Learning jobs?"
133
+
134
+ # Test few-shot template
135
+ # print(dynamic_prompt.format(question = "What does the Software Engineer job usually require?"))
136
+
137
+ # # Test generate Cypher
138
+ # result = generate_cypher(question)
139
+
140
+ # # Test return information from Cypher
141
+ # final_result = run_cypher(result)
142
+ # print(final_result)
143
+
144
+ # Test lookup_kg tool
145
+ kg_info = lookup_kg(question)
146
+ print(kg_info)
Agent/tools/tavily_search_v2.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from langchain_google_genai import ChatGoogleGenerativeAI
4
+ from tavily import TavilyClient
5
+ from langchain.tools import BaseTool, StructuredTool, tool
6
+
7
+ load_dotenv()
8
+
9
+ os.environ["TAVILY_API_KEY"] = os.getenv("TAVILY_API_KEY")
10
+ os.environ["GOOGLE_API_KEY"] = os.getenv("GEMINI_API_KEY")
11
+
12
+ def tavily_search(question: str) -> str:
13
+ """
14
+ useful for when you need to search relevant informations such as: jobs, companies from Web sites.
15
+ """
16
+
17
+ search_prompt = f"""
18
+ Response to user question by search job descriptions include: job titles, company, required skill, education, etc related to job recruitment posts in Vietnam.
19
+
20
+ Query: {question}
21
+ """
22
+
23
+ tavily = TavilyClient(
24
+ api_key = os.environ["TAVILY_API_KEY"],
25
+ )
26
+
27
+ response = tavily.search(
28
+ query = question,
29
+ include_raw_content = True,
30
+ max_results = 5
31
+ )
32
+
33
+ search_results = ""
34
+ for obj in response["results"]:
35
+ search_results += f"""
36
+ - Page content: {obj['raw_content']}
37
+ Source: {obj['url']}
38
+
39
+ """
40
+
41
+ print(search_results)
42
+
43
+ response_prompt = f"""
44
+ Generate a concise and informative summary of the results in a polite and easy-to-understand manner based on question and Tavily search results.
45
+ Returns URLs at the end of the summary for proof.
46
+
47
+ Question: {question}
48
+ Search Results:
49
+ {search_results}
50
+
51
+ Answer:
52
+ """
53
+
54
+ # return context
55
+
56
+ def tavily_qna_search(question: str) -> str:
57
+ tavily = TavilyClient(
58
+ api_key=os.environ["TAVILY_API_KEY"],
59
+ )
60
+
61
+ response = tavily.qna_search(query=question)
62
+ return response
63
+
64
+ if __name__ == "__main__":
65
+ question = "Software Engineer job postings in Vietnam"
66
+
67
+ result = tavily_search(question)
68
+ print(result)
Agent/utils.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import yaml
3
+ from dotenv import load_dotenv
4
+ from langchain_google_genai import ChatGoogleGenerativeAI
5
+ from langchain_community.graphs import Neo4jGraph
6
+ from langchain_core.prompts.prompt import PromptTemplate
7
+ from langchain.chains import GraphCypherQAChain
8
+ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
9
+
10
+
11
+
12
+ def config():
13
+ load_dotenv()
14
+
15
+ # Set up Neo4J & Gemini API
16
+ os.environ["NEO4J_URI"] = os.getenv("NEO4J_URI")
17
+ os.environ["NEO4J_USERNAME"] = os.getenv("NEO4J_USERNAME")
18
+ os.environ["NEO4J_PASSWORD"] = os.getenv("NEO4J_PASSWORD")
19
+ os.environ["GOOGLE_API_KEY"] = os.getenv("GEMINI_API_KEY")
20
+
21
+ def load_prompt(filepath):
22
+ with open(filepath, "r") as file:
23
+ prompt = yaml.safe_load(file)
24
+
25
+ return prompt
26
+
27
+ def init_():
28
+ config()
29
+ graph = Neo4jGraph()
30
+ llm = ChatGoogleGenerativeAI(
31
+ model= "gemini-1.5-flash-latest"
32
+ )
33
+
34
+ return graph, llm
35
+
36
+ def get_llm_response(query):
37
+ # Connect to Neo4J Knowledge Graph
38
+ knowledge_graph, llm_chat = init_()
39
+ cypher_prompt = load_prompt("Agent/prompts/cypher_prompt.yaml")
40
+ qa_prompt = load_prompt("Agent/prompts/qa_prompt.yaml")
41
+
42
+ CYPHER_GENERATION_PROMPT = PromptTemplate(**cypher_prompt)
43
+ QA_GENERATION_PROMPT = PromptTemplate(**qa_prompt)
44
+
45
+ chain = GraphCypherQAChain.from_llm(
46
+ llm_chat, graph=knowledge_graph, verbose=True,
47
+ cypher_prompt= CYPHER_GENERATION_PROMPT,
48
+ qa_prompt= QA_GENERATION_PROMPT
49
+ )
50
+
51
+ return chain.invoke({"query": query})["result"]
52
+
53
+ def llm_answer(message, history):
54
+ # history_langchain_format = []
55
+ #
56
+ # for human, ai in history:
57
+ # history_langchain_format.append(HumanMessage(content= human))
58
+ # history_langchain_format.append(AIMessage(content= ai))
59
+ #
60
+ # history_langchain_format.append(HumanMessage(content= message["text"]))
61
+
62
+ try:
63
+ response = get_llm_response(message["text"])
64
+ except Exception:
65
+ response = "Exception"
66
+ except Error:
67
+ response = "Error"
68
+ return response
69
+
70
+ # if __name__ == "__main__":
71
+ # message = "Have any company recruiting jobs about Machine Learning and coresponding job titles?"
72
+ # history = [("What's your name?", "My name is Gemini")]
73
+ # resp = llm_answer(message, history)
74
+ # print(resp)
Knowledge_Graph/.env DELETED
@@ -1,4 +0,0 @@
1
- NEO4J_URI=neo4j+s://7d728e56.databases.neo4j.io
2
- NEO4J_USERNAME=neo4j
3
- NEO4J_PASSWORD=v81MIwaDw3wd3NCcPMpHv4vDc9qAssCkVoYrf6Rk0a0
4
- GEMINI_API_KEY=AIzaSyDVjpl5kun36J_EdFsuLrwFsgLuPACKh4c
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import chainlit as cl
2
+ from langchain.prompts import ChatPromptTemplate
3
+ from langchain.schema import StrOutputParser
4
+ from langchain.schema.runnable import Runnable
5
+ from langchain.schema.runnable.config import RunnableConfig
6
+ from react_agent_v2 import get_react_agent
7
+ from langchain.memory import ConversationBufferMemory
8
+ from langchain_community.chat_message_histories import ChatMessageHistory
9
+ from langchain_core.chat_history import BaseChatMessageHistory
10
+ from langchain_core.runnables.history import RunnableWithMessageHistory
11
+
12
+
13
+
14
+ @cl.on_chat_start
15
+ async def on_chat_start():
16
+ message_history = ChatMessageHistory()
17
+ memory = ConversationBufferMemory(
18
+ memory_key = "chat_history",
19
+ output_key = "output",
20
+ chat_memory = message_history,
21
+ return_message = True
22
+ )
23
+
24
+ agent_executor = get_react_agent(memory)
25
+ cl.user_session.set("runnable", agent_executor)
26
+
27
+
28
+ @cl.on_message
29
+ async def on_message(message: cl.Message):
30
+
31
+
32
+ llm_chain = cl.user_session.get("runnable")
33
+
34
+ response = llm_chain.invoke(
35
+ {"input": message.content}, callbacks = [cl.LangchainCallbackHandler()]
36
+ )
37
+
38
+ await cl.Message(response["output"].replace("`", "")).send()
react_agent_v2.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.agents import Tool, AgentType, initialize_agent
2
+ from langchain.memory import ConversationBufferMemory
3
+ from langchain_google_genai import ChatGoogleGenerativeAI
4
+ from langchain.agents import AgentExecutor
5
+ from langchain import hub
6
+ from langchain.agents.format_scratchpad import format_log_to_str
7
+ from langchain.agents.output_parsers import ReActSingleInputOutputParser
8
+ from langchain.tools.render import render_text_description
9
+ import os
10
+ from Agent.tools.kg_search import lookup_kg
11
+ from Agent.tools.tavily_search_v2 import tavily_search, tavily_qna_search
12
+
13
+ from dotenv import load_dotenv
14
+ from langchain.agents import Tool
15
+ from langchain_core.prompts import PromptTemplate
16
+
17
+ load_dotenv()
18
+ os.environ["GOOGLE_API_KEY"] = os.getenv("GEMINI_API_KEY")
19
+ llm = ChatGoogleGenerativeAI(
20
+ model= "gemini-1.5-flash-latest",
21
+ temperature = 0
22
+ )
23
+
24
+
25
+
26
+ kg_query = Tool(
27
+ name = 'Query Knowledge Graph',
28
+ func = lookup_kg,
29
+ description='Useful for when you need to answer questions about job posts.'
30
+ )
31
+
32
+
33
+ web_search = Tool(
34
+ name = 'Web Search',
35
+ func = tavily_qna_search,
36
+ description = "Useful for when you need to search for external information."
37
+ )
38
+
39
+ tools = [kg_query, web_search]
40
+
41
+
42
+ with open("Agent/prompts/react_prompt_v2.txt", "r") as file:
43
+ react_template = file.read()
44
+
45
+ react_prompt = PromptTemplate(
46
+ input_variables = ["tools", "tool_names", "input", "agent_scratchpad", "chat_history"],
47
+ template = react_template
48
+ )
49
+
50
+ prompt = react_prompt.partial(
51
+ tools = render_text_description(tools),
52
+ tool_names = ", ".join([t.name for t in tools]),
53
+ )
54
+
55
+ llm_with_stop = llm.bind(stop=["\nObservation"])
56
+
57
+ agent = (
58
+ {
59
+ "input": lambda x: x["input"],
60
+ "agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
61
+ "chat_history": lambda x: x["chat_history"],
62
+ }
63
+ | prompt
64
+ | llm_with_stop
65
+ | ReActSingleInputOutputParser()
66
+ )
67
+
68
+ memory = ConversationBufferMemory(memory_key="chat_history")
69
+
70
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)
71
+
72
+
73
+ def get_react_agent(memory):
74
+ agent_executor = AgentExecutor(
75
+ agent=agent,
76
+ tools=tools,
77
+ verbose=True,
78
+ memory=memory
79
+ )
80
+
81
+ return agent_executor
82
+
83
+
84
+
85
+ # if __name__ == "__main__":
86
+ # while True:
87
+ # try:
88
+ # question = input("> ")
89
+ # result = agent_executor.invoke({
90
+ # "input": question
91
+ # })
92
+ # except:
93
+ # break
requirements.txt CHANGED
@@ -10,3 +10,11 @@ instructor
10
  langchain_community
11
  google.generativeai
12
  neo4j
 
 
 
 
 
 
 
 
 
10
  langchain_community
11
  google.generativeai
12
  neo4j
13
+ langchain
14
+ langchain-community
15
+ langchain-google-genai
16
+ langchain-core
17
+ faiss-cpu
18
+ langchainhub
19
+ tavily-python
20
+ chainlit