Spaces:
Sleeping
Sleeping
Shahid
commited on
Commit
·
af30a30
1
Parent(s):
eae1335
Added first commit
Browse files- .DS_Store +0 -0
- app.py +345 -0
- cleaned_output.txt +340 -0
- config/__init__.py +0 -0
- config/api_config.py +0 -0
- data/output/tasks_Week_1.json +72 -0
- data_ingestion/.DS_Store +0 -0
- data_ingestion/__init__.py +0 -0
- data_ingestion/ingest_data.py +14 -0
- data_ingestion/preprocess_data.py +15 -0
- data_ingestion/sample_data/Day-1 Transcript.docx +0 -0
- data_ingestion/sample_data/Day-2 Transcript.docx +0 -0
- database/__init__.py +0 -0
- database/models.py +0 -0
- database/mongo_integration.py +58 -0
- database/setup_db.py +14 -0
- llm_integration/__init__.py +0 -0
- llm_integration/task_comparison.py +115 -0
- llm_integration/task_extraction.py +82 -0
- poetry.lock +0 -0
- pyproject.toml +20 -0
- requirements.txt +0 -0
- test.ipynb +1303 -0
- test.py +26 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app.py
|
2 |
+
import streamlit as st
|
3 |
+
from st_aggrid import AgGrid, GridOptionsBuilder
|
4 |
+
import pandas as pd
|
5 |
+
import os, json
|
6 |
+
from datetime import datetime
|
7 |
+
from data_ingestion.ingest_data import read_document
|
8 |
+
from data_ingestion.preprocess_data import preprocess_text
|
9 |
+
from llm_integration.task_extraction import extract_tasks_from_text
|
10 |
+
from llm_integration.task_comparison import compare_task_data
|
11 |
+
from database.mongo_integration import *
|
12 |
+
from notion_client import Client
|
13 |
+
from pymongo import MongoClient
|
14 |
+
from datetime import datetime
|
15 |
+
# from database.mongo_integration import save_tasks_to_mongo
|
16 |
+
os.makedirs("data/output", exist_ok=True)
|
17 |
+
import pandas as pd
|
18 |
+
|
19 |
+
mongo_client = MongoClient("mongodb+srv://shahid:Protondev%[email protected]/") # Replace with your MongoDB URI
|
20 |
+
db = mongo_client["task_management"]
|
21 |
+
employee_project_collection = db["employee_project"]
|
22 |
+
|
23 |
+
notion = Client(auth="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")
|
24 |
+
parent_page_id = "148b2f92b9948099a854e8b21a0640a3"
|
25 |
+
notion_database_id = "14db2f92-b994-81fb-9132-f4e4cb46ac13"
|
26 |
+
|
27 |
+
def fetch_latest_task_entry():
|
28 |
+
"""
|
29 |
+
Fetch the most recent entry from MongoDB.
|
30 |
+
Returns:
|
31 |
+
dict: The latest task entry as a dictionary.
|
32 |
+
"""
|
33 |
+
latest_entry = employee_project_collection.find_one(sort=[("created_at", DESCENDING)])
|
34 |
+
if latest_entry:
|
35 |
+
return latest_entry
|
36 |
+
else:
|
37 |
+
raise ValueError("No entries found in MongoDB.")
|
38 |
+
|
39 |
+
|
40 |
+
def push_to_notion(latest_entry):
|
41 |
+
"""
|
42 |
+
Push tasks from the latest entry to the Notion database.
|
43 |
+
Args:
|
44 |
+
latest_entry (dict): The most recent task data from MongoDB.
|
45 |
+
"""
|
46 |
+
# Extract the tasks from the JSON
|
47 |
+
tasks = latest_entry.get("consolidated_final_task", {})
|
48 |
+
created_at = latest_entry.get("created_at", None)
|
49 |
+
|
50 |
+
# Step 1: Archive existing tasks in Notion database
|
51 |
+
with st.spinner("Archiving existing tasks in Notion..."):
|
52 |
+
try:
|
53 |
+
# Query all pages in the Notion database (this will fetch the existing tasks)
|
54 |
+
notion_database = notion.databases.query(database_id=notion_database_id)
|
55 |
+
|
56 |
+
# Loop through the database pages and archive them
|
57 |
+
for page in notion_database['results']:
|
58 |
+
notion.pages.update(page_id=page['id'], archived=True)
|
59 |
+
st.info("Old tasks archived in Notion successfully.")
|
60 |
+
except Exception as e:
|
61 |
+
st.error(f"Failed to archive tasks in Notion: {e}")
|
62 |
+
|
63 |
+
# Step 2: Push new tasks to Notion
|
64 |
+
with st.spinner("Pushing new tasks to Notion..."):
|
65 |
+
try:
|
66 |
+
# Iterate over projects and their tasks
|
67 |
+
for project_name, task_list in tasks.items():
|
68 |
+
for task_id, task_details in task_list.items():
|
69 |
+
# Map MongoDB fields to Notion properties
|
70 |
+
notion_task = {
|
71 |
+
"parent": {"database_id": notion_database_id},
|
72 |
+
"properties": {
|
73 |
+
"Project Name": {"title": [{"type": "text", "text": {"content": project_name}}]},
|
74 |
+
"Task ID": {"rich_text": [{"type": "text", "text": {"content": task_id}}]},
|
75 |
+
"Description": {"rich_text": [{"type": "text", "text": {"content": task_details.get("description", "")}}]},
|
76 |
+
"Priority": {"select": {"name": task_details.get("priority", "low")}},
|
77 |
+
"Assigned To": {"rich_text": [{"type": "text", "text": {"content": task_details.get("assigned_to", "")}}]}, # Updated to rich_text
|
78 |
+
"Current Status": {"select": {"name": task_details.get("current_status", "pending")}},
|
79 |
+
"Created At": {"date": {"start": created_at.isoformat() if created_at else datetime.utcnow().isoformat()}}
|
80 |
+
}
|
81 |
+
}
|
82 |
+
|
83 |
+
# Push each task to Notion
|
84 |
+
response = notion.pages.create(**notion_task)
|
85 |
+
print(f"Task pushed to Notion: {response['id']}")
|
86 |
+
st.success("New tasks pushed to Notion successfully!")
|
87 |
+
except Exception as e:
|
88 |
+
st.error(f"Failed to push tasks to Notion: {e}")
|
89 |
+
|
90 |
+
|
91 |
+
def json_to_dataframe(json_data):
|
92 |
+
"""
|
93 |
+
Converts a nested JSON structure into a user-friendly Pandas DataFrame for display.
|
94 |
+
|
95 |
+
Args:
|
96 |
+
json_data (dict): The JSON object containing projects and tasks.
|
97 |
+
|
98 |
+
Returns:
|
99 |
+
pd.DataFrame: A DataFrame representing the JSON data.
|
100 |
+
"""
|
101 |
+
data = []
|
102 |
+
for project_name, tasks in json_data.items():
|
103 |
+
for task_id, task_details in tasks.items():
|
104 |
+
data.append({
|
105 |
+
"Project": project_name,
|
106 |
+
"Task Name": task_id,
|
107 |
+
"Description": task_details["description"],
|
108 |
+
"Priority": task_details["priority"],
|
109 |
+
"Assigned To": task_details["assigned_to"],
|
110 |
+
"Status": task_details["current_status"]
|
111 |
+
})
|
112 |
+
|
113 |
+
return pd.DataFrame(data)
|
114 |
+
|
115 |
+
def dataframe_to_json(df):
|
116 |
+
"""
|
117 |
+
Converts a Pandas DataFrame back into a nested JSON structure.
|
118 |
+
|
119 |
+
Args:
|
120 |
+
df (pd.DataFrame): The DataFrame containing projects and tasks.
|
121 |
+
|
122 |
+
Returns:
|
123 |
+
dict: A nested dictionary representing the original JSON data.
|
124 |
+
"""
|
125 |
+
json_data = {}
|
126 |
+
|
127 |
+
# Iterate over each row of the DataFrame
|
128 |
+
for _, row in df.iterrows():
|
129 |
+
project_name = row['Project']
|
130 |
+
task_id = row['Task Name']
|
131 |
+
|
132 |
+
# Ensure the project exists in the JSON structure
|
133 |
+
if project_name not in json_data:
|
134 |
+
json_data[project_name] = {}
|
135 |
+
|
136 |
+
# Add or update the task under the corresponding project
|
137 |
+
json_data[project_name][task_id] = {
|
138 |
+
"description": row['Description'],
|
139 |
+
"priority": row['Priority'],
|
140 |
+
"assigned_to": row['Assigned To'],
|
141 |
+
"current_status": row['Status']
|
142 |
+
}
|
143 |
+
|
144 |
+
return json_data
|
145 |
+
|
146 |
+
# Function to fetch the most recent tasks from Notion
|
147 |
+
def fetch_recent_tasks_from_notion():
|
148 |
+
"""
|
149 |
+
Fetch the most recent tasks from the Notion database and return it as a list of dicts.
|
150 |
+
"""
|
151 |
+
try:
|
152 |
+
# Query the database to get the most recent tasks
|
153 |
+
query_response = notion.databases.query(
|
154 |
+
**{
|
155 |
+
"database_id": notion_database_id,
|
156 |
+
"sorts": [{"property": "Created At", "direction": "descending"}],
|
157 |
+
"page_size": 20 # Get the 5 most recent tasks, adjust the page size as needed
|
158 |
+
}
|
159 |
+
)
|
160 |
+
|
161 |
+
# Extract tasks from the query response
|
162 |
+
tasks = []
|
163 |
+
for result in query_response.get("results", []):
|
164 |
+
task_data = {
|
165 |
+
"Project Name": result["properties"]["Project Name"]["title"][0]["text"]["content"],
|
166 |
+
"Task ID": result["properties"]["Task ID"]["rich_text"][0]["text"]["content"],
|
167 |
+
"Description": result["properties"]["Description"]["rich_text"][0]["text"]["content"],
|
168 |
+
"Priority": result["properties"]["Priority"]["select"]["name"],
|
169 |
+
"Assigned To": result["properties"]["Assigned To"]["rich_text"][0]["text"]["content"] if result["properties"]["Assigned To"]["rich_text"] else "",
|
170 |
+
"Current Status": result["properties"]["Current Status"]["select"]["name"],
|
171 |
+
"Created At": result["properties"]["Created At"]["date"]["start"]
|
172 |
+
}
|
173 |
+
tasks.append(task_data)
|
174 |
+
|
175 |
+
return tasks
|
176 |
+
|
177 |
+
except Exception as e:
|
178 |
+
print(f"Error fetching tasks from Notion: {e}")
|
179 |
+
return []
|
180 |
+
|
181 |
+
# Function to display recent tasks in DataFrame on the dashboard
|
182 |
+
def display_recent_tasks_on_dashboard():
|
183 |
+
"""
|
184 |
+
Fetch and display the most recent tasks from Notion in a DataFrame on the Streamlit dashboard.
|
185 |
+
"""
|
186 |
+
tasks = fetch_recent_tasks_from_notion()
|
187 |
+
|
188 |
+
if tasks:
|
189 |
+
# Convert tasks into a DataFrame
|
190 |
+
df = pd.DataFrame(tasks)
|
191 |
+
|
192 |
+
# Display DataFrame in Streamlit
|
193 |
+
st.subheader("Most Recent Tasks from Notion")
|
194 |
+
st.dataframe(df)
|
195 |
+
else:
|
196 |
+
st.write("No tasks found in the Notion database.")
|
197 |
+
|
198 |
+
# Initialize Streamlit app
|
199 |
+
st.set_page_config(
|
200 |
+
page_title="Task Management",
|
201 |
+
page_icon="📋",
|
202 |
+
layout="wide"
|
203 |
+
)
|
204 |
+
|
205 |
+
# Define session state for managing intermediate data
|
206 |
+
if "processed_tasks" not in st.session_state:
|
207 |
+
st.session_state.processed_tasks = None
|
208 |
+
|
209 |
+
if "edited_df" not in st.session_state:
|
210 |
+
st.session_state.edited_df=None
|
211 |
+
|
212 |
+
if "comparison_results" not in st.session_state:
|
213 |
+
st.session_state.comparison_results = None
|
214 |
+
|
215 |
+
tab1, tab2,tab3 = st.tabs(["Dashboard", "Upload and Process","Review Updated Tasks"]) #
|
216 |
+
|
217 |
+
# Initialize session state for tab navigation
|
218 |
+
if "active_tab" not in st.session_state:
|
219 |
+
st.session_state.active_tab = 0
|
220 |
+
|
221 |
+
# Function to switch tabs
|
222 |
+
def switch_tab(tab_index):
|
223 |
+
st.session_state.active_tab = tab_index
|
224 |
+
|
225 |
+
# -------------------------------
|
226 |
+
# Tab 1: Dashboard
|
227 |
+
# -------------------------------
|
228 |
+
|
229 |
+
# if st.session_state.active_tab == 0:
|
230 |
+
with tab1:
|
231 |
+
with st.container():
|
232 |
+
st.title("📋 Task Management Dashboard")
|
233 |
+
# Display recent tasks from Notion in a DataFrame
|
234 |
+
display_recent_tasks_on_dashboard()
|
235 |
+
|
236 |
+
# # Quick actions
|
237 |
+
# st.subheader("Quick Actions")
|
238 |
+
# if st.button("Upload and Process New Tasks"):
|
239 |
+
# switch_tab(1)
|
240 |
+
# if st.button("Review and Approve Tasks"):
|
241 |
+
# switch_tab(2)
|
242 |
+
|
243 |
+
# -------------------------------
|
244 |
+
# Tab 2: Upload and Process
|
245 |
+
# -------------------------------
|
246 |
+
# elif st.session_state.active_tab == 1:
|
247 |
+
|
248 |
+
# Tab 2: Upload and Process
|
249 |
+
with tab2:
|
250 |
+
with st.container():
|
251 |
+
st.title("📤 Upload and Process Tasks")
|
252 |
+
|
253 |
+
uploaded_file = st.file_uploader("Upload a .docx file", type=["docx"])
|
254 |
+
|
255 |
+
if uploaded_file is not None:
|
256 |
+
with st.spinner("Processing uploaded file..."):
|
257 |
+
# Step 1: Extract cleaned text
|
258 |
+
raw_data = read_document(uploaded_file)
|
259 |
+
cleaned_text = preprocess_text(raw_data)
|
260 |
+
cleaned_text = "\n".join([f"{entry['author']}: {entry['text']}" for entry in cleaned_text])
|
261 |
+
|
262 |
+
# Step 2: Extract tasks
|
263 |
+
if 'df' not in st.session_state:
|
264 |
+
extracted_tasks = extract_tasks_from_text(cleaned_text)
|
265 |
+
|
266 |
+
st.subheader("Processed Tasks (DataFrame View)")
|
267 |
+
st.session_state.df = json_to_dataframe(extracted_tasks)
|
268 |
+
|
269 |
+
# Display the DataFrame for editing
|
270 |
+
edited_df = st.data_editor(st.session_state.df)
|
271 |
+
st.session_state.edited_df = edited_df
|
272 |
+
|
273 |
+
edited_extracted_tasks_json = dataframe_to_json(edited_df)
|
274 |
+
st.session_state.processed_tasks = edited_extracted_tasks_json
|
275 |
+
st.success("Tasks extracted successfully!")
|
276 |
+
|
277 |
+
# Step 3: Push extracted tasks to MongoDB
|
278 |
+
if st.button("Save tasks & Compare"):
|
279 |
+
with st.spinner("Saving tasks to MongoDB..."):
|
280 |
+
try:
|
281 |
+
insert_weekly_task_data(edited_extracted_tasks_json)
|
282 |
+
st.success("Tasks successfully saved to the database!")
|
283 |
+
except Exception as e:
|
284 |
+
st.error(f"Failed to save tasks to the database: {e}")
|
285 |
+
|
286 |
+
if 'df' in st.session_state:
|
287 |
+
del st.session_state['df']
|
288 |
+
st.info("Temporary data removed from session state.")
|
289 |
+
|
290 |
+
# Step 4: Run comparison
|
291 |
+
with st.spinner("Running task comparison..."):
|
292 |
+
st.write("Running task comparison...")
|
293 |
+
recent_entries = fetch_recent_two_entries()
|
294 |
+
latest_entry = fetch_latest_task_entry()
|
295 |
+
if len(recent_entries) >= 2:
|
296 |
+
old_tasks = latest_entry.get("consolidated_final_task", {})
|
297 |
+
new_tasks = recent_entries[0]["tasks"]
|
298 |
+
comparison_results = compare_task_data(old_tasks, new_tasks)
|
299 |
+
st.session_state.comparison_results = comparison_results
|
300 |
+
st.success("Task comparison completed! Please move to Review section")
|
301 |
+
else:
|
302 |
+
st.warning("Not enough data to run comparison.")
|
303 |
+
|
304 |
+
# Tab 3: Review and Approve Tasks
|
305 |
+
with tab3:
|
306 |
+
st.title("🔍 Review and Approve Tasks")
|
307 |
+
|
308 |
+
if st.session_state.comparison_results is None:
|
309 |
+
st.warning("No comparison results available. Please upload and process tasks first.")
|
310 |
+
else:
|
311 |
+
# Display comparison results
|
312 |
+
if st.session_state.comparison_results:
|
313 |
+
# st.subheader("Comparison Results (DataFrame View)")
|
314 |
+
if "compared_df" not in st.session_state:
|
315 |
+
st.session_state.compared_df = json_to_dataframe(st.session_state.comparison_results)
|
316 |
+
|
317 |
+
# st.dataframe(st.session_state.compared_df)
|
318 |
+
|
319 |
+
# Inline editing of tasks
|
320 |
+
st.subheader("Edit Tasks")
|
321 |
+
final_edited_df = st.data_editor(st.session_state.compared_df)
|
322 |
+
st.session_state.final_edited_df = final_edited_df
|
323 |
+
|
324 |
+
final_extracted_tasks_json = dataframe_to_json(final_edited_df)
|
325 |
+
|
326 |
+
# Approval and finalization
|
327 |
+
if st.button("Approve and Finalize Tasks"):
|
328 |
+
with st.spinner("Finalizing tasks..."):
|
329 |
+
try:
|
330 |
+
db = get_database()
|
331 |
+
updated_collection = db["employee_project"]
|
332 |
+
document = {
|
333 |
+
"consolidated_final_task": final_extracted_tasks_json,
|
334 |
+
"created_at": datetime.now()
|
335 |
+
}
|
336 |
+
updated_collection.insert_one(document)
|
337 |
+
st.success("Finalized tasks saved successfully!")
|
338 |
+
except Exception as e:
|
339 |
+
st.error(f"Failed to save tasks: {e}")
|
340 |
+
|
341 |
+
if st.button("Push to Notion Dashboard"):
|
342 |
+
with st.spinner("Pushing to Notion..."):
|
343 |
+
latest_entry = fetch_latest_task_entry()
|
344 |
+
push_to_notion(latest_entry)
|
345 |
+
st.success("Notion Dashboard has been updated")
|
cleaned_output.txt
ADDED
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Shahid S: Hi! Vick! Hello!
|
2 |
+
Vivek: Hey, Shahid?
|
3 |
+
Vivek: Okay, I made it on time. I didn't expect
|
4 |
+
Vivek: traffic was easy. Okay.
|
5 |
+
Vivek: So
|
6 |
+
Vivek: yeah, let's talk about
|
7 |
+
Vivek: yeah, we don't necessarily have to. Okay. I saw your. I was
|
8 |
+
Vivek: reading your message. You were saying that you don't have anything to say. Say.
|
9 |
+
Vivek: Oh.
|
10 |
+
Vivek: but okay.
|
11 |
+
Shahid S: The projects. Like, what I was thinking about the project. We can talk on that if you want.
|
12 |
+
Shahid S: So yeah.
|
13 |
+
Vivek: Which project.
|
14 |
+
Shahid S: I mean the chat tool.
|
15 |
+
Shahid S: the task manager role.
|
16 |
+
Vivek: Okay. But but before we get there you have all the list of things which I have given you right? We have like lot of things we we are going on. We have going on. So I need you to have that. And I would like you to lead, and you know, bring that back to me, because
|
17 |
+
Vivek: I'm already occupied with lot of other things. So I would like you to take lead on all of those things and make sure that you are on top of that, and also.
|
18 |
+
Vivek: make that as a priority, like how not like a full solution.
|
19 |
+
Vivek: a sort of solution where where we both are.
|
20 |
+
Vivek: on the same page, so that we don't want to get into that, Miss Confusion, or, you know, miscommunication. Where
|
21 |
+
Vivek: of of
|
22 |
+
Vivek: I assume that. Hey? That was priority
|
23 |
+
Vivek: and that did not get delivered on time or things like that. So for that reason have that like how you want me.
|
24 |
+
Vivek: how you want to keep me posted so that I know that you are on top of everything.
|
25 |
+
Vivek: because, that's the 1st thing we want to get out of our way, because that has caused a lot of friction
|
26 |
+
Vivek: and
|
27 |
+
Vivek: past couple of weeks. It was not in the beginning. Because
|
28 |
+
Vivek: but yeah, we are. We are getting better. It. It will get better. But we need to have some sort of communication
|
29 |
+
Vivek: method and process, so that
|
30 |
+
Vivek: we both are on top of everything, and also
|
31 |
+
Vivek: I know what you're doing.
|
32 |
+
Vivek: and you know what's the most important thing, and if you have any questions blockers, you will directly let me know.
|
33 |
+
Vivek: and we will also try to optimize our time when we connect so that you know
|
34 |
+
Vivek: we we, we are getting most thing done. It's not like we have to get on call every time for everything. So
|
35 |
+
Vivek: with that go ahead.
|
36 |
+
Shahid S: Yeah. So basically, our priority right now, bony plants is bony plants is done.
|
37 |
+
Shahid S: The next one comes is the upload thing right? Washington government project.
|
38 |
+
Shahid S: So it's I've sent it for the review and all those questions. I've sent it to the upload team, and also
|
39 |
+
Shahid S: along with that, I have to also talk about that new Rtf. S. 3 r. 3. Alarm.
|
40 |
+
Shahid S: On it. Right? Yeah. So I want, I've asked for their input. Once they are ready on that, we'll discuss about it, and we'll get back to you regarding what our solution or what their solution looks like. I went through it.
|
41 |
+
Vivek: Come, but
|
42 |
+
Vivek: sounds great. Sounds great. One thing, I will add. Just make sure to send a reminder every day like, Hey, what's going on what's up. You know that like that, you just cannot wait for them to come back to you or come back to us. Because.
|
43 |
+
Vivek: yeah, it's a human nature, you know, people forget. So you have to remind them.
|
44 |
+
Vivek: So just do that task. Okay, okay.
|
45 |
+
Shahid S: Yeah, I'll make sure that this applore review thing gets done. Washington State Government Review get done. Is the volume we have still 2 days. Right? So yeah.
|
46 |
+
Vivek: Yeah.
|
47 |
+
Shahid S: And after that we'll work with this s. 3 r, 3, Rpf solution. Regarding this 3 website, redesigning marketing and research strategy
|
48 |
+
Shahid S: on those 3. We'll talk about it
|
49 |
+
Shahid S: uploading
|
50 |
+
Shahid S: contact.
|
51 |
+
Vivek: Yeah. Have you looked at the deadline? I think it's sometime in November, right.
|
52 |
+
Shahid S: Yeah.
|
53 |
+
Vivek: Okay, so have a a high level roadmap key. By 15th of November we need to have 1st draft
|
54 |
+
Vivek: by 20.th We need to have second draft, and by 30th we need to have like so create a milestone like based on that and have a high level idea. Give them input
|
55 |
+
Vivek: act as if you are senior. Otherwise you know and lead that when you lead you will, you will be able to delegate lot of lot of the work, so
|
56 |
+
Vivek: make sure that you do that, and then that way we'll be able to get things done, and I'll meanwhile, have they responded to your s. 3. Request yet.
|
57 |
+
Shahid S: No, they have not responded to both of the request yet. I'll make sure to. I'll just remind them again tomorrow.
|
58 |
+
Shahid S: Yeah.
|
59 |
+
Vivek: Okay, so do that because I will. If not, then I will also message from my side
|
60 |
+
Vivek: the CEO of Babel.
|
61 |
+
Vivek: A
|
62 |
+
Vivek: yeah, they. I'll message him as well, and but you do from your party, because you are on slack channel right?
|
63 |
+
Vivek: And for some reason their slack channel doesn't show doesn't integrate with my slack channel. So I have to go on a separate link to see that, or how are you able to integrate that together?
|
64 |
+
Shahid S: Yeah, I mean, yes, I was able to get the slack into our
|
65 |
+
Shahid S: or space.
|
66 |
+
Vivek: Okay, I'll we'll figure that out later. Okay, so what's next?
|
67 |
+
Shahid S: That being said, yeah, we I was just working on that. Our use cases for Grant engine what we did, how to find it, together with that, our rag solution or rag block that we are writing.
|
68 |
+
Shahid S: I think I might get something. But yeah, I'm writing on that again. I'm working to have a look
|
69 |
+
Shahid S: what we did for granting, and how to bind it together with this, our brand
|
70 |
+
Shahid S: exactly.
|
71 |
+
Vivek: Okay, so let me clarify. There rag is totally separate.
|
72 |
+
Vivek: G. Copilot. We are writing case study just like we wrote for the pony plant.
|
73 |
+
Vivek: and then don't overthink like whenever whatever comes to your mind. So this is how the writing works. This is coaching going on for writing.
|
74 |
+
Vivek: I'm not a writer myself.
|
75 |
+
Vivek: How do you do it? You create a 1st draft. Send it to me. I'll take a pass, and then I'll give you a feedback. Okay?
|
76 |
+
Vivek: And then I will add something to it, and then and then also give you feedback, like, what are the things you can do, and then you revise just like, how did Bonnie Plant happen like? Did we see that it will turn out into a good case study?
|
77 |
+
Vivek: It was like so much broken pieces were there right? We refined it so many times that it became a very coherent story. So
|
78 |
+
Vivek: don't worry about perfection on the 1st quote itself, create so and drag and just to be just to clarify rag, article blog, and G is separate. But of course, like you can take some of the stuff we did
|
79 |
+
Vivek: 4G pilot, or we could have done to explain. Rag.
|
80 |
+
Vivek: Okay, yeah.
|
81 |
+
Vivek: So this thing, I think you can share it with me like this. I would have expected G. Copilot 1st draft and as well as drag. 1st stop, because I gave you feedback on drag as well as G. Copilot. Both
|
82 |
+
Vivek: that what? What are the things we need to.
|
83 |
+
Shahid S: That's the same document. Actually, if you can leave feedback, that would be great whatever it is, and if the content looks great, then I'll start adding a another content as well. I have started creating it.
|
84 |
+
Shahid S: But yeah, I will just put this into the document. If that looks fine.
|
85 |
+
Shahid S: the outline looks fine.
|
86 |
+
Vivek: Okay.
|
87 |
+
Shahid S: So that being said, we have to.
|
88 |
+
Vivek: Yeah.
|
89 |
+
Shahid S: One is
|
90 |
+
Shahid S: another is Zico valid case study.
|
91 |
+
Shahid S: I'll send you once again, so that you can.
|
92 |
+
Vivek: Right.
|
93 |
+
Shahid S: I'll start adding the contents tomorrow as well.
|
94 |
+
Shahid S: Right oops.
|
95 |
+
Shahid S: And after that we have another 2 R&D and Pocs.
|
96 |
+
Shahid S: That is number one is our internal Lm comparison, tool
|
97 |
+
Shahid S: and number 2 is our
|
98 |
+
Shahid S: task manager.
|
99 |
+
Shahid S: From our communication.
|
100 |
+
Shahid S: These 2 aren't plugins. We are working.
|
101 |
+
Shahid S: It doesn't.
|
102 |
+
Vivek: Okay.
|
103 |
+
Vivek: okay, okay, so you will have to put things in order. I know, like, this is, like, we have to arrange this thing in order, so that and I think once we have that
|
104 |
+
Vivek: Task manager kind of thing, we don't have to wait for that just we can start with simple.
|
105 |
+
Vivek: a calm girl there made a simple idea. Right? Summary.
|
106 |
+
Vivek: So was call Case summary. Go after yesterday's call
|
107 |
+
Vivek: key. We have to put it in a specific format.
|
108 |
+
Vivek: Abhi, watch for that. Then let's let's just do a simple thing.
|
109 |
+
Vivek: He
|
110 |
+
Vivek: Whatever we are discussing on the call, we will. I will
|
111 |
+
Vivek: take the summary of this and post it. Okay?
|
112 |
+
Vivek: And then you try to. Or let's say data, how, let's say, just like
|
113 |
+
Vivek: which Api use the connect. Which means how it is doing that. Okay?
|
114 |
+
Vivek: And then and then we will see, like, key we can integrate our Google drive. We can integrate our
|
115 |
+
Vivek: email and slack as well.
|
116 |
+
Vivek: Okay, so let's let's start here. Let's let's keep it safe.
|
117 |
+
Shahid S: Yeah, that would be great. You can send me the summary after this call. I'll try to have a look without any more customization or.
|
118 |
+
Vivek: Right.
|
119 |
+
Shahid S: How this transcript looks like.
|
120 |
+
Vivek: Yeah, we we can start simple with
|
121 |
+
Shahid S: If you can get idea to how it is capable of doing something, then we can think about adding some extra factors or layers.
|
122 |
+
Vivek: Right, right
|
123 |
+
Vivek: I might have
|
124 |
+
Vivek: in future. I feel like Ui will be very, very important. People will differentiate AI engineering
|
125 |
+
Vivek: Hoja. I got your software engineering
|
126 |
+
Vivek: like in you need some sort of engineering skill and also business skill. And also like creativity is Ui, that that's gonna drive the
|
127 |
+
Vivek: that's going to be a a big
|
128 |
+
Vivek: big capability.
|
129 |
+
Vivek: So even before, like parallel team, success criteria join a team level, of course, scalable, fast efficient.
|
130 |
+
Vivek: Then, ui!
|
131 |
+
Vivek: And then
|
132 |
+
Vivek: is it
|
133 |
+
Vivek: making our life easy?
|
134 |
+
Vivek: Matter? Is it even worth
|
135 |
+
Vivek: building, you know. So what is simple? I'll send you the summary. And let's see, I'll I'll let you go. Creative, go creative on that.
|
136 |
+
Shahid S: Yeah for chat. I think we we can do that. Once we get a summary, then let's check on directly on chat security. How it performs. Then we can build something on top of that
|
137 |
+
Shahid S: moving to the next one. That is our Llm. Comparison field.
|
138 |
+
Shahid S: I was thinking about something, how we can do that. And initial goal. Was this right? Let's suppose a client comes in.
|
139 |
+
Shahid S: He puts his business problem and his key metrics, key performance metrics into it.
|
140 |
+
Shahid S: and he get a comparison about 2 elements which one performs better right?
|
141 |
+
Shahid S: So.
|
142 |
+
Vivek: There's so many.
|
143 |
+
Shahid S: Thank you. Let's define some of the metrics itself. Give any true data we can get from Internet like accuracy. Let's suppose I'm comparing 2 models that is, chat and cloud sonnet model, right.
|
144 |
+
Vivek: Right.
|
145 |
+
Shahid S: Various parameters or metrics, performance metrics like accuracy, response, time
|
146 |
+
Shahid S: and its cost, scalability, security. All these things right.
|
147 |
+
Vivek: Right, right.
|
148 |
+
Shahid S: After. Yeah, all these key metrics. So let's build something. He will include all his problem statement.
|
149 |
+
Shahid S: And we give that problem statement to another Llm.
|
150 |
+
Shahid S: And it will do what you will say. Better standard format in a better format. I am a normal user, right? Business problem that if I give it to better.
|
151 |
+
Shahid S: So we'll provide an Llm. Choose a better format, Melik, and what we'll do. We'll send this better format writing of problem statement to 2 different elements. Okay, chat Gpt and clouds on it and ask it to write how we're going to solve the problem.
|
152 |
+
Shahid S: Give a problem statement, a mirror, it work as a solution.
|
153 |
+
Shahid S: so don't know if it.
|
154 |
+
Shahid S: and then we'll have a kind of a judge there.
|
155 |
+
Shahid S: depending on which solution looks much better.
|
156 |
+
Shahid S: Given that key performance. Metrics of the user or list is this thing
|
157 |
+
Shahid S: and Joe solution, donor model is generate care.
|
158 |
+
Shahid S: So what kind of performance indicators you were better.
|
159 |
+
Shahid S: Discuss solution, better. Lebra.
|
160 |
+
Vivek: Okay. So I get it. But I'm little bit usual because it gets it gets technical here. So
|
161 |
+
Vivek: have you used figma or figma around Tumnevo research here.
|
162 |
+
Vivek: Tumjitna technical and the level you are going. I'm not. I don't have time. And also
|
163 |
+
Vivek: like, I cannot like I want to understand that. And the gap can be filled
|
164 |
+
Vivek: if you learn to or figma, both easier to make working prototype.
|
165 |
+
Vivek: So free version, maybe they offer quite a lot of things. So to explain this kind of technical thing or maybe that will be really helpful.
|
166 |
+
Vivek: And then we don't have to think I I like that. I followed you till there where you were.
|
167 |
+
Vivek: Do model comparison.
|
168 |
+
Vivek: That's a good question.
|
169 |
+
Vivek: a requirement, non technical requirement of numbers, business point of view, Sathika or up
|
170 |
+
Vivek: security picture efficiency, scalability.
|
171 |
+
Vivek: accuracy which I get.
|
172 |
+
Vivek: And and within security we will further classify Yoga Joki Apneko. This is this is where we will stand out security
|
173 |
+
Vivek: in Llm. Or aim. Para Rowland.
|
174 |
+
Vivek: this is going to play a big role, and this is where people have
|
175 |
+
Vivek: lot of fear. So
|
176 |
+
Vivek: we will ask those questions. Security? I don't know. I would like you to think on that.
|
177 |
+
Vivek: and then ask those questions which will be important for the Llm.
|
178 |
+
Vivek: From security point of view.
|
179 |
+
Vivek: And then
|
180 |
+
Vivek: Bfsi Walle
|
181 |
+
Vivek: models right
|
182 |
+
Vivek: or more the exceptional case. But he's trying to solve
|
183 |
+
Vivek: customer of cable. Tam.
|
184 |
+
Vivek: Hmm
|
185 |
+
Vivek: concept problem solver. Let's say customer service problem solver. Right? So Bfsi
|
186 |
+
Vivek: come, model will be helpful, but we will see Key by the customer service care on Con. That's a very edge case, and that's very exceptional. But
|
187 |
+
Vivek: straight away I'm filter out cutting it based on the industry. Right?
|
188 |
+
Vivek: So I don't know. I talked a lot here, but.
|
189 |
+
Shahid S: No, no, that's so. How we can do that. Again. We have 50 models then, based on the industry. What we are working at domain of that problem statement, we can filter 1st layer filtering
|
190 |
+
Shahid S: out of 15. We are getting 5 elements.
|
191 |
+
Shahid S: then on that problem statement.
|
192 |
+
Shahid S: joby solution, generate care. 5 different models. Next, as a judge, we will use another Llm. As a judge who say, Judge, which which solution is better
|
193 |
+
Shahid S: given, that performance indicators that client is.
|
194 |
+
Vivek: Hmm! Well.
|
195 |
+
Shahid S: Asking about it, which solution seems better to the client
|
196 |
+
Shahid S: I feel better in compare in, given that these are the 3 performance.
|
197 |
+
Vivek: One second.
|
198 |
+
Vivek: right.
|
199 |
+
Shahid S: How did model? Actually, because, Abi, in a space, if you see
|
200 |
+
Shahid S: the this evolution? AI evolution, this is also very technical.
|
201 |
+
Vivek: No, you continue. But I want to make sure. So let's say, 50 models, 1, 2, 50, okay, 1, 2, 3, 4, 5 models. Where
|
202 |
+
Vivek: 3rd model, Yoga
|
203 |
+
Vivek: which will be from 1, 2, 3, 4, 5, or it will be from 6 to 58th model.
|
204 |
+
Shahid S: It will be from, are we if we are comparing Chat Gpt. 4 and cloud sonnet Cloud version. Then, as an I'll be also keeping this chat. Gpt. 4. Because voice of service better modular.
|
205 |
+
Vivek: That will be biased right?
|
206 |
+
Shahid S: In that case we need to check you with.
|
207 |
+
Shahid S: Solution right? And other solution, because Llm doesn't.
|
208 |
+
Vivek: So, okay, so this is where we need to be very smart and we can brainstorm.
|
209 |
+
Vivek: So ye, this is where my business point of view is also wrong. My Ir. We are
|
210 |
+
Vivek: the Bulroki Apni family mesibilia here.
|
211 |
+
Vivek: there. There is like competition between 2 families, and then this one family is deciding which one is better, they will obviously tell, like, Hey or chatgpt pay
|
212 |
+
Vivek: to grog. I'm I'm bad at making decision, and which one is the best.
|
213 |
+
Vivek: No, it doesn't tell.
|
214 |
+
Vivek: He doesn't tell that.
|
215 |
+
Shahid S: That chat. Gpt is best time with, Yeah.
|
216 |
+
Vivek: We have to come up with we. So this is where we have to come up with. Can we have some sort of
|
217 |
+
Vivek: metric which we decide kipash. I'm gonna select earlier
|
218 |
+
Vivek: Abba
|
219 |
+
Vivek: of hum decision, later, based on human as a human, we have some evaluation criteria.
|
220 |
+
Vivek: If partial model go home, because data or partial model
|
221 |
+
Vivek: say, hum, decide, parent, not any 3rd party, Llm. Maybe. 1st round preliminary round. 3rd party.
|
222 |
+
Vivek: 3rd party led and say feedback.
|
223 |
+
Vivek: But at the end we are making decision because that will be crucial, or also make make sure he bother the expensive product. We don't have that much resources. So think from that point of view as well.
|
224 |
+
Vivek: But here, what do you think about that? Do you think? Can we do something
|
225 |
+
Vivek: along that line? How I'm thinking.
|
226 |
+
Shahid S: Now we can do that because I have given a thought. It's my whole job.
|
227 |
+
Shahid S: We are having some parameters and some metrics right to just something
|
228 |
+
Shahid S: toggle business solution there are.
|
229 |
+
Shahid S: And given that these parameters we can rate it to fit. We can do it right. Click on. Better compare, let's say, accuracy for for Bfsi problem statement, this accuracy is other cloud sonnet. Accuracy is other compared to Gpt 4
|
230 |
+
Shahid S: and response. Time is also faster. Then user will definitely go for Cloud Summit. Right? Cloud Summit
|
231 |
+
Shahid S: sped. There you go.
|
232 |
+
Shahid S: These are the performance. We'll give a report. Then it comes to at the end. It's the decision is up to the user, right client capacity, decision.
|
233 |
+
Vivek: Okay.
|
234 |
+
Shahid S: And this is all you wanted. And this is how this model perform on this type of metrics. Given your problems right?
|
235 |
+
Vivek: Okay, got it? Got it?
|
236 |
+
Vivek: Okay.
|
237 |
+
Vivek: sounds good to me. So
|
238 |
+
Vivek: think along that line.
|
239 |
+
Vivek: And yeah, let's start executing
|
240 |
+
Vivek: allocate your time. Accordingly, you know 2 h for development, 1 h for writing, and you have to like, sit down and block your time.
|
241 |
+
Vivek: There will be run time and then block that time. Be this time.
|
242 |
+
Vivek: And that's why I was talking about. It's not a gesture. It's important. Once you have your work, text location and chair.
|
243 |
+
Vivek: So that will create the separation. Be okay. You hop in a bad round. So now I'm professionally doing things.
|
244 |
+
Vivek: and it's also important for
|
245 |
+
Vivek: us when we are going to with client meeting. These are big meetings.
|
246 |
+
Vivek: It's very important for us to, you know, be very professional, so it's not
|
247 |
+
Vivek: any. I mean, it's also a gesture, but also something which is really, really important. So
|
248 |
+
Vivek: don't take this anything outside context. This is very
|
249 |
+
Vivek: I'm thinking, from productivity. Point of view, I'm thinking, from professionally professional point of view.
|
250 |
+
Vivek: And I'm thinking for for your growth, my growth, and for companies growth.
|
251 |
+
Vivek: So there's nothing more than that.
|
252 |
+
Vivek: So just
|
253 |
+
Vivek: and yeah, we will. We'll talk about this later. But that was my thought process. Anything else.
|
254 |
+
Shahid S: No, that's it, and just Preeta's also. Push the codes into the Github.
|
255 |
+
Shahid S: I will have a look and let you know about it.
|
256 |
+
Vivek: Okay?
|
257 |
+
Vivek: So I have like, bunch of ideas on
|
258 |
+
Vivek: ui, or website, pay to be honest after
|
259 |
+
Vivek: feedback from any open source or a
|
260 |
+
Vivek: which is fine.
|
261 |
+
Vivek: But that's
|
262 |
+
Vivek: something I want to have like, minimal website should be simple. Tell what we do. And then
|
263 |
+
Vivek: Blogs was done in, so
|
264 |
+
Vivek: I don't know anything else you need from him, from ui side
|
265 |
+
Vivek: ui help like.
|
266 |
+
Vivek: is there any
|
267 |
+
Vivek: square space
|
268 |
+
Vivek: or wix? It allows you to do drag and drop ui, you know.
|
269 |
+
Vivek: So that's something I will go for
|
270 |
+
Vivek: ideally. But we we did that. And then that time I was
|
271 |
+
Vivek: thinking that Kamlesh will lead
|
272 |
+
Vivek: but I feel like experience, wise and vision wise not matching
|
273 |
+
Vivek: I want to go fast, and I want to go like
|
274 |
+
Vivek: big
|
275 |
+
Vivek: and for that reason, like.
|
276 |
+
Vivek: I'm changing all those things.
|
277 |
+
Vivek: yeah, I know you. Tomorrow Ui may tomorrow experience you. That's not a problem. And that shouldn't be a problem right
|
278 |
+
Vivek: ui, to have
|
279 |
+
Vivek: that can be plugged in
|
280 |
+
Vivek: ui solution.
|
281 |
+
Shahid S: And you get multiple sources for this year. Right? Basically website.
|
282 |
+
Shahid S: you can just drag and drop elements and make a good ui around them.
|
283 |
+
Vivek: Yeah.
|
284 |
+
Vivek: Oh, you see.
|
285 |
+
Shahid S: Efforts, neither.
|
286 |
+
Vivek: And you use python to do python we have last time we copy basic. Right?
|
287 |
+
Vivek: Yeah.
|
288 |
+
Vivek: Hmm.
|
289 |
+
Vivek: okay. So we'll we'll talk about that later. Anything else. Shahid.
|
290 |
+
Shahid S: No, that's it. That's it for you.
|
291 |
+
Vivek: Okay, what are the things we talked? How was different? It will be hard for me as well as you.
|
292 |
+
Vivek: So that's something we need to figure it out right. It doesn't have to be like best solution. It's just simple sheet, maybe a more tracker, right?
|
293 |
+
Vivek: So that will be good.
|
294 |
+
Vivek: So that
|
295 |
+
Vivek: yeah. So basically hect.
|
296 |
+
Vivek: we are creating AI assistant for both of us or for the company. Once we grow
|
297 |
+
Vivek: monthly. Ka quarter ka, long term, short term vision.
|
298 |
+
Vivek: or it's evolved right? It's it's always evolving.
|
299 |
+
Vivek: Oh.
|
300 |
+
Vivek: yeah, which was which became top of the priority. So it will shift.
|
301 |
+
Vivek: or that itself creates.
|
302 |
+
Vivek: This is like lot of work.
|
303 |
+
Vivek: So if we can have a system which is already shuffling, and you know, reordering thing, this will make our lives so easy
|
304 |
+
Vivek: that
|
305 |
+
Vivek: yeah, let's try it. This can be a
|
306 |
+
Vivek: yeah. Let's let's try it.
|
307 |
+
Shahid S: Yeah, that's right. The value of task defined, or is the timeline?
|
308 |
+
Shahid S: Then we can think about.
|
309 |
+
Vivek: Timeline. I don't think
|
310 |
+
Vivek: AI or chat. Gpt application itself will do the timeline up. Nego could could sort of
|
311 |
+
Vivek: indicator create
|
312 |
+
Vivek: or whether there is, there has to be some level of templatization at some level of process or structure or not. Here.
|
313 |
+
Vivek: Okay, to do list.
|
314 |
+
Vivek: Okay, let's start. I mean, if we'll get too ahead, then it will get complicated, and then we will not take action. So let's start somewhere, and then we will take it from there. Okay, so just do your give your best shot.
|
315 |
+
Vivek: and then we will refine it.
|
316 |
+
Vivek: I have, like.
|
317 |
+
Vivek: decent
|
318 |
+
Vivek: idea, key product to end product.
|
319 |
+
Vivek: But execution challenge Mojij we have
|
320 |
+
Vivek: for any, or and I think you cannot also envision, because you don't exactly know how I'm thinking right now.
|
321 |
+
Vivek: But do you have the basic idea? Give what I'm aiming for.
|
322 |
+
Shahid S: Let's start it where we can iterate on that the cacket changes. Come, and what
|
323 |
+
Shahid S: towards that? If you are thinking about it, and I'll share it in Joby. All the development. I'll keep sharing with you so that you can give feedback and we can work on it.
|
324 |
+
Vivek: Okay.
|
325 |
+
Vivek: Alright, Shahid, that's awesome travel.
|
326 |
+
Shahid S: I'll be traveling on 4th or 5, th 5th to 6, th actually.
|
327 |
+
Vivek: Okay.
|
328 |
+
Vivek: so to my to family that
|
329 |
+
Vivek: didn't English would teach chateau.
|
330 |
+
Vivek: Do you want break.
|
331 |
+
Shahid S: I will ask if I need great. But yeah, I'll be traveling, but I can work on that time.
|
332 |
+
Vivek: Okay. Cool to my family is the time spent. If you need break
|
333 |
+
Vivek: you let me know, Mujabas. Advance me, Mujab Adina.
|
334 |
+
Vivek: or so let me know. Keep me keep me posted.
|
335 |
+
Vivek: And then, yeah, spend time with family, and things will be slowing down.
|
336 |
+
Vivek: Take break, and then we will.
|
337 |
+
Vivek: Yeah. Keep you posted.
|
338 |
+
Shahid S: Alright!
|
339 |
+
Vivek: Alright, shay, thank you so much. I'll talk to you later. Bye.
|
340 |
+
Shahid S: Okay? Bye.
|
config/__init__.py
ADDED
File without changes
|
config/api_config.py
ADDED
File without changes
|
data/output/tasks_Week_1.json
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"Bony Plants": {
|
3 |
+
"Task-1": {
|
4 |
+
"description": "Complete the Bony Plants project.",
|
5 |
+
"priority": "high",
|
6 |
+
"assigned_to": "Shahid S",
|
7 |
+
"current_status": "completed"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"Washington State Government Project": {
|
11 |
+
"Task-1": {
|
12 |
+
"description": "Submit the project for review to the Applore team.",
|
13 |
+
"priority": "high",
|
14 |
+
"assigned_to": "Shahid S",
|
15 |
+
"current_status": "in progress"
|
16 |
+
},
|
17 |
+
"Task-2": {
|
18 |
+
"description": "Ensure reminders are sent daily to follow up on the review.",
|
19 |
+
"priority": "high",
|
20 |
+
"assigned_to": "Shahid S",
|
21 |
+
"current_status": "pending"
|
22 |
+
}
|
23 |
+
},
|
24 |
+
"S3 R3 Project": {
|
25 |
+
"Task-1": {
|
26 |
+
"description": "Discuss RTF S3 R3 alarm with the team once feedback is received.",
|
27 |
+
"priority": "medium",
|
28 |
+
"assigned_to": "Shahid S",
|
29 |
+
"current_status": "pending"
|
30 |
+
}
|
31 |
+
},
|
32 |
+
"Website Redesign and Marketing Strategy": {
|
33 |
+
"Task-1": {
|
34 |
+
"description": "Create a high-level roadmap and plan milestones for the project.",
|
35 |
+
"priority": "medium",
|
36 |
+
"assigned_to": "Shahid S",
|
37 |
+
"current_status": "in progress"
|
38 |
+
}
|
39 |
+
},
|
40 |
+
"Grant Engine and RAG Solution": {
|
41 |
+
"Task-1": {
|
42 |
+
"description": "Draft the use cases document for the Grant Engine and RAG Solution.",
|
43 |
+
"priority": "medium",
|
44 |
+
"assigned_to": "Shahid S",
|
45 |
+
"current_status": "in progress"
|
46 |
+
}
|
47 |
+
},
|
48 |
+
"G Copilot Case Study": {
|
49 |
+
"Task-1": {
|
50 |
+
"description": "Draft the initial case study document for G Copilot.",
|
51 |
+
"priority": "medium",
|
52 |
+
"assigned_to": "Shahid S",
|
53 |
+
"current_status": "pending"
|
54 |
+
}
|
55 |
+
},
|
56 |
+
"Internal LLM Comparison Tool": {
|
57 |
+
"Task-1": {
|
58 |
+
"description": "Define metrics and process for comparing LLMs using business problem statements.",
|
59 |
+
"priority": "medium",
|
60 |
+
"assigned_to": "Shahid S",
|
61 |
+
"current_status": "in progress"
|
62 |
+
}
|
63 |
+
},
|
64 |
+
"Task Manager and Plugins": {
|
65 |
+
"Task-1": {
|
66 |
+
"description": "Develop a task manager prototype for better project management and communication.",
|
67 |
+
"priority": "high",
|
68 |
+
"assigned_to": "Shahid S",
|
69 |
+
"current_status": "pending"
|
70 |
+
}
|
71 |
+
}
|
72 |
+
}
|
data_ingestion/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
data_ingestion/__init__.py
ADDED
File without changes
|
data_ingestion/ingest_data.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# data_ingestion/ingest_data.py
|
2 |
+
from docx import Document
|
3 |
+
|
4 |
+
def read_document(file_path):
|
5 |
+
"""Reads a Word document and extracts text content from each line."""
|
6 |
+
document = Document(file_path)
|
7 |
+
text_data = []
|
8 |
+
|
9 |
+
for para in document.paragraphs:
|
10 |
+
line = para.text.strip()
|
11 |
+
if line: # Only add non-empty lines
|
12 |
+
text_data.append(line)
|
13 |
+
|
14 |
+
return text_data
|
data_ingestion/preprocess_data.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# data_ingestion/preprocess_data.py
|
2 |
+
import re
|
3 |
+
|
4 |
+
def preprocess_text(data):
|
5 |
+
"""Cleans extracted text data to retain only the author and their message."""
|
6 |
+
cleaned_data = []
|
7 |
+
|
8 |
+
for line in data:
|
9 |
+
# Match pattern with author and text (e.g., "Author: Message")
|
10 |
+
match = re.match(r"^(.*?):\s+(.*)$", line)
|
11 |
+
if match:
|
12 |
+
author, text = match.groups()
|
13 |
+
cleaned_data.append({"author": author.strip(), "text": text.strip()})
|
14 |
+
|
15 |
+
return cleaned_data
|
data_ingestion/sample_data/Day-1 Transcript.docx
ADDED
Binary file (39.6 kB). View file
|
|
data_ingestion/sample_data/Day-2 Transcript.docx
ADDED
Binary file (27.9 kB). View file
|
|
database/__init__.py
ADDED
File without changes
|
database/models.py
ADDED
File without changes
|
database/mongo_integration.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from database.setup_db import get_database
|
2 |
+
from datetime import datetime
|
3 |
+
import uuid
|
4 |
+
from pymongo import DESCENDING
|
5 |
+
def get_current_week_identifier():
|
6 |
+
"""
|
7 |
+
Generate a week identifier based on the current year and week number.
|
8 |
+
Example: '2023_45' for the 45th week of 2023.
|
9 |
+
"""
|
10 |
+
now = datetime.now()
|
11 |
+
year = now.year
|
12 |
+
week_number = now.isocalendar()[1] # ISO week number
|
13 |
+
return f"{year}_Week_{week_number}"
|
14 |
+
def insert_weekly_task_data(json_data,):
|
15 |
+
"""
|
16 |
+
Insert JSON data into the weekly_tasks collection.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
json_data (dict): JSON object containing task data.
|
20 |
+
"""
|
21 |
+
db = get_database()
|
22 |
+
collection = db["weekly_tasks"]
|
23 |
+
# Generate the current week identifier
|
24 |
+
week_identifier = get_current_week_identifier()
|
25 |
+
unique_id = str(uuid.uuid4())
|
26 |
+
# Check if a document for the given week already exists
|
27 |
+
existing_document = collection.find_one({"unique_id": unique_id})
|
28 |
+
if existing_document:
|
29 |
+
print(f"Document with id: {unique_id} already exists. Skipping insert.")
|
30 |
+
return
|
31 |
+
|
32 |
+
# Insert the document if it doesn't already exist
|
33 |
+
document = {
|
34 |
+
"week": week_identifier,
|
35 |
+
"unique_id": unique_id,
|
36 |
+
"tasks": json_data,
|
37 |
+
"created_at": datetime.now()
|
38 |
+
}
|
39 |
+
result = collection.insert_one(document)
|
40 |
+
print(f"Inserted document with ID: {result.inserted_id}")
|
41 |
+
|
42 |
+
|
43 |
+
def fetch_recent_two_entries():
|
44 |
+
"""
|
45 |
+
Fetch the two most recent entries from the weekly_tasks collection
|
46 |
+
based on the created_at timestamp.
|
47 |
+
|
48 |
+
Returns:
|
49 |
+
list: A list of the two most recent documents from the collection.
|
50 |
+
"""
|
51 |
+
db = get_database()
|
52 |
+
collection = db["weekly_tasks"]
|
53 |
+
|
54 |
+
# Query to fetch the two most recent entries
|
55 |
+
recent_entries = list(
|
56 |
+
collection.find().sort("created_at", DESCENDING).limit(2)
|
57 |
+
)
|
58 |
+
return recent_entries
|
database/setup_db.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# database/mongo_setup.py
|
2 |
+
from pymongo import MongoClient
|
3 |
+
|
4 |
+
def get_mongo_client():
|
5 |
+
"""Connect to the MongoDB Atlas cluster."""
|
6 |
+
connection_string = "mongodb+srv://shahid:Protondev%[email protected]/"
|
7 |
+
client = MongoClient(connection_string)
|
8 |
+
return client
|
9 |
+
|
10 |
+
def get_database():
|
11 |
+
"""Connect to the task_management database."""
|
12 |
+
client = get_mongo_client()
|
13 |
+
db = client["task_management"]
|
14 |
+
return db
|
llm_integration/__init__.py
ADDED
File without changes
|
llm_integration/task_comparison.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# llm_integration/task_extraction.py
|
2 |
+
from openai import OpenAI
|
3 |
+
import re
|
4 |
+
import json
|
5 |
+
|
6 |
+
def extract_json_from_raw_response(raw_response):
|
7 |
+
"""
|
8 |
+
Extract the JSON part from the raw response string.
|
9 |
+
|
10 |
+
Args:
|
11 |
+
raw_response (str): The raw response from the LLM containing JSON and additional text.
|
12 |
+
|
13 |
+
Returns:
|
14 |
+
dict: Parsed JSON object.
|
15 |
+
"""
|
16 |
+
# Use regex to extract the JSON block between ```json and ```
|
17 |
+
match = re.search(r"```json(.*?)```", raw_response, re.DOTALL)
|
18 |
+
if match:
|
19 |
+
json_string = match.group(1).strip() # Extract the matched JSON part
|
20 |
+
try:
|
21 |
+
json_data = json.loads(json_string) # Parse the JSON string into a Python dictionary
|
22 |
+
return json_data
|
23 |
+
except json.JSONDecodeError as e:
|
24 |
+
print(f"Error decoding JSON: {e}")
|
25 |
+
return None
|
26 |
+
else:
|
27 |
+
print("No valid JSON block found in the response.")
|
28 |
+
return None
|
29 |
+
|
30 |
+
|
31 |
+
def compare_task_data(old_task_data, new_task_data):
|
32 |
+
"""
|
33 |
+
Send old and new task data to the LLM for comparison.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
old_task_data (dict): JSON data for the older tasks.
|
37 |
+
new_task_data (dict): JSON data for the newer tasks.
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
dict: Consolidated JSON with updates and new tasks.
|
41 |
+
"""
|
42 |
+
# Prepare the prompt
|
43 |
+
prompt = f"""
|
44 |
+
Given the following two sets of task JSON data, compare them and:
|
45 |
+
1. Identify projects and tasks present in the second JSON but not in the first.
|
46 |
+
- If two projects have different names but are contextually similar (e.g., due to spelling differences or tasks), treat them as the same project and merge their tasks.
|
47 |
+
|
48 |
+
2. For tasks that exist in both JSONs within the same project:
|
49 |
+
- Compare the following fields:
|
50 |
+
- "description"
|
51 |
+
- "priority"
|
52 |
+
- "assigned_to"
|
53 |
+
- "current_status"
|
54 |
+
- If any changes are detected in these fields, update the task details in the output.
|
55 |
+
|
56 |
+
3. If a project or task in the second JSON contains new tasks or subtasks not present in the first JSON:
|
57 |
+
- Add those tasks or subtasks to the corresponding project in the output.
|
58 |
+
|
59 |
+
4. Ensure the final JSON structure meets the following conditions:
|
60 |
+
- Each project appears only once in the JSON.
|
61 |
+
- All tasks are uniquely represented under their respective projects.
|
62 |
+
- Updates to tasks (e.g., changes in "priority", "assigned_to", or "current_status") are applied.
|
63 |
+
- Tasks or subtasks are not duplicated across the output.
|
64 |
+
|
65 |
+
FIRST TASK DATA:
|
66 |
+
'''
|
67 |
+
{old_task_data}
|
68 |
+
'''
|
69 |
+
SECOND TASK DATA:
|
70 |
+
'''
|
71 |
+
{new_task_data}
|
72 |
+
'''
|
73 |
+
Expected Output:
|
74 |
+
A single consolidated JSON structure where:
|
75 |
+
- Projects are uniquely represented and merged based on contextual similarity.
|
76 |
+
- Each project contains all relevant tasks, including updates and newly added ones.
|
77 |
+
- All tasks follow this structure:
|
78 |
+
|
79 |
+
Return a single consolidated JSON structure with:
|
80 |
+
{{
|
81 |
+
"project_name_1": {{
|
82 |
+
"Task-1": {{
|
83 |
+
"description": "Brief description of the task",
|
84 |
+
"priority": "high/medium/low",
|
85 |
+
"assigned_to": "Person responsible",
|
86 |
+
"current_status": "Status of the task (e.g., completed, in progress, pending)"
|
87 |
+
}},
|
88 |
+
"Task-2": {{
|
89 |
+
"description": "Brief description of the task",
|
90 |
+
"priority": "high/medium/low",
|
91 |
+
"assigned_to": "Person responsible",
|
92 |
+
"current_status": "Status of the task (e.g., completed, in progress, pending)"
|
93 |
+
}}
|
94 |
+
}},
|
95 |
+
"project_name_2": {{
|
96 |
+
"Task-1": {{
|
97 |
+
"description": "Brief description of the task",
|
98 |
+
"priority": "high/medium/low",
|
99 |
+
"assigned_to": "Person responsible",
|
100 |
+
"current_status": "Status of the task (e.g., completed, in progress, pending)"
|
101 |
+
}}
|
102 |
+
}}
|
103 |
+
}}
|
104 |
+
"""
|
105 |
+
|
106 |
+
client = OpenAI(api_key='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')
|
107 |
+
|
108 |
+
stream = client.chat.completions.create(
|
109 |
+
model="gpt-4o",
|
110 |
+
messages=[{"role": "user", "content":prompt}],
|
111 |
+
# stream=True,
|
112 |
+
)
|
113 |
+
raw_response = stream.choices[0].message.content
|
114 |
+
final_response= extract_json_from_raw_response(raw_response)
|
115 |
+
return final_response
|
llm_integration/task_extraction.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# llm_integration/task_extraction.py
|
2 |
+
from openai import OpenAI
|
3 |
+
import re
|
4 |
+
import json
|
5 |
+
|
6 |
+
def extract_json_from_raw_response(raw_response):
|
7 |
+
"""
|
8 |
+
Extract the JSON part from the raw response string.
|
9 |
+
|
10 |
+
Args:
|
11 |
+
raw_response (str): The raw response from the LLM containing JSON and additional text.
|
12 |
+
|
13 |
+
Returns:
|
14 |
+
dict: Parsed JSON object.
|
15 |
+
"""
|
16 |
+
# Use regex to extract the JSON block between ```json and ```
|
17 |
+
match = re.search(r"```json(.*?)```", raw_response, re.DOTALL)
|
18 |
+
if match:
|
19 |
+
json_string = match.group(1).strip() # Extract the matched JSON part
|
20 |
+
try:
|
21 |
+
json_data = json.loads(json_string) # Parse the JSON string into a Python dictionary
|
22 |
+
return json_data
|
23 |
+
except json.JSONDecodeError as e:
|
24 |
+
print(f"Error decoding JSON: {e}")
|
25 |
+
return None
|
26 |
+
else:
|
27 |
+
print("No valid JSON block found in the response.")
|
28 |
+
return None
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
def extract_tasks_from_text(conversation_text):
|
33 |
+
"""Send conversation text to the LLM and extract tasks in JSON format."""
|
34 |
+
# Define the prompt
|
35 |
+
prompt = f"""
|
36 |
+
Extract detailed project information from the following text and structure it in JSON format.
|
37 |
+
The JSON should have each project as a main key, with tasks as subkeys. For each task, include
|
38 |
+
the following fields: "description", "priority", "assigned_to", and "current_status".
|
39 |
+
Use the conversation details to populate the values accurately.
|
40 |
+
|
41 |
+
Text:
|
42 |
+
'''
|
43 |
+
{conversation_text}
|
44 |
+
'''
|
45 |
+
|
46 |
+
Expected JSON Output:
|
47 |
+
{{
|
48 |
+
"project_name_1": {{
|
49 |
+
"Task-1": {{
|
50 |
+
"description": "Brief description of the task",
|
51 |
+
"priority": "high/medium/low",
|
52 |
+
"assigned_to": "Person responsible",
|
53 |
+
"current_status": "Status of the task (e.g., completed, in progress, pending)"
|
54 |
+
}},
|
55 |
+
"Task-2": {{
|
56 |
+
"description": "Brief description of the task",
|
57 |
+
"priority": "high/medium/low",
|
58 |
+
"assigned_to": "Person responsible",
|
59 |
+
"current_status": "Status of the task (e.g., completed, in progress, pending)"
|
60 |
+
}}
|
61 |
+
}},
|
62 |
+
"project_name_2": {{
|
63 |
+
"Task-1": {{
|
64 |
+
"description": "Brief description of the task",
|
65 |
+
"priority": "high/medium/low",
|
66 |
+
"assigned_to": "Person responsible",
|
67 |
+
"current_status": "Status of the task (e.g., completed, in progress, pending)"
|
68 |
+
}}
|
69 |
+
}}
|
70 |
+
}}
|
71 |
+
"""
|
72 |
+
|
73 |
+
client = OpenAI(api_key='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')
|
74 |
+
|
75 |
+
stream = client.chat.completions.create(
|
76 |
+
model="gpt-4o",
|
77 |
+
messages=[{"role": "user", "content":prompt}],
|
78 |
+
# stream=True,
|
79 |
+
)
|
80 |
+
raw_response = stream.choices[0].message.content
|
81 |
+
final_response= extract_json_from_raw_response(raw_response)
|
82 |
+
return final_response
|
poetry.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "project management"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = ""
|
5 |
+
authors = ["Your Name <[email protected]>"]
|
6 |
+
readme = "README.md"
|
7 |
+
|
8 |
+
[tool.poetry.dependencies]
|
9 |
+
python = "^3.12"
|
10 |
+
streamlit = "^1.39.0"
|
11 |
+
pymongo = "^4.10.1"
|
12 |
+
python-docx = "^1.1.2"
|
13 |
+
openai = "^1.54.4"
|
14 |
+
streamlit-aggrid = "^1.0.5"
|
15 |
+
notion-client = "^2.2.1"
|
16 |
+
|
17 |
+
|
18 |
+
[build-system]
|
19 |
+
requires = ["poetry-core"]
|
20 |
+
build-backend = "poetry.core.masonry.api"
|
requirements.txt
ADDED
File without changes
|
test.ipynb
ADDED
@@ -0,0 +1,1303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 18,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"name": "stdout",
|
10 |
+
"output_type": "stream",
|
11 |
+
"text": [
|
12 |
+
"Defaulting to user installation because normal site-packages is not writeable\n",
|
13 |
+
"Requirement already satisfied: openai in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (1.54.3)\n",
|
14 |
+
"Requirement already satisfied: jiter<1,>=0.4.0 in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from openai) (0.7.0)\n",
|
15 |
+
"Requirement already satisfied: httpx<1,>=0.23.0 in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from openai) (0.27.2)\n",
|
16 |
+
"Requirement already satisfied: anyio<5,>=3.5.0 in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from openai) (4.6.2.post1)\n",
|
17 |
+
"Requirement already satisfied: tqdm>4 in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from openai) (4.67.0)\n",
|
18 |
+
"Requirement already satisfied: typing-extensions<5,>=4.11 in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from openai) (4.12.2)\n",
|
19 |
+
"Requirement already satisfied: sniffio in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from openai) (1.3.1)\n",
|
20 |
+
"Collecting pydantic<3,>=1.9.0\n",
|
21 |
+
" Downloading pydantic-2.10.2-py3-none-any.whl (456 kB)\n",
|
22 |
+
"\u001b[K |████████████████████████████████| 456 kB 1.6 MB/s eta 0:00:01\n",
|
23 |
+
"\u001b[?25hRequirement already satisfied: distro<2,>=1.7.0 in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from openai) (1.9.0)\n",
|
24 |
+
"Requirement already satisfied: exceptiongroup>=1.0.2 in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from anyio<5,>=3.5.0->openai) (1.2.2)\n",
|
25 |
+
"Requirement already satisfied: idna>=2.8 in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from anyio<5,>=3.5.0->openai) (3.10)\n",
|
26 |
+
"Requirement already satisfied: certifi in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from httpx<1,>=0.23.0->openai) (2024.8.30)\n",
|
27 |
+
"Requirement already satisfied: httpcore==1.* in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from httpx<1,>=0.23.0->openai) (1.0.7)\n",
|
28 |
+
"Requirement already satisfied: h11<0.15,>=0.13 in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai) (0.14.0)\n",
|
29 |
+
"Requirement already satisfied: annotated-types>=0.6.0 in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from pydantic<3,>=1.9.0->openai) (0.7.0)\n",
|
30 |
+
"Collecting pydantic-core==2.27.1\n",
|
31 |
+
" Downloading pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl (1.8 MB)\n",
|
32 |
+
"\u001b[K |████████████████████████████████| 1.8 MB 4.3 MB/s eta 0:00:01\n",
|
33 |
+
"\u001b[?25hInstalling collected packages: pydantic-core, pydantic\n",
|
34 |
+
" Attempting uninstall: pydantic-core\n",
|
35 |
+
" Found existing installation: pydantic-core 2.23.4\n",
|
36 |
+
" Uninstalling pydantic-core-2.23.4:\n",
|
37 |
+
" Successfully uninstalled pydantic-core-2.23.4\n",
|
38 |
+
" Attempting uninstall: pydantic\n",
|
39 |
+
" Found existing installation: pydantic 1.6.2\n",
|
40 |
+
" Uninstalling pydantic-1.6.2:\n",
|
41 |
+
" Successfully uninstalled pydantic-1.6.2\n",
|
42 |
+
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
|
43 |
+
"tea-client 0.0.7 requires httpx[http2]~=0.14.2, but you have httpx 0.27.2 which is incompatible.\n",
|
44 |
+
"tea-client 0.0.7 requires pydantic~=1.6.1, but you have pydantic 2.10.2 which is incompatible.\u001b[0m\n",
|
45 |
+
"Successfully installed pydantic-2.10.2 pydantic-core-2.27.1\n",
|
46 |
+
"\u001b[33mWARNING: You are using pip version 21.2.4; however, version 24.3.1 is available.\n",
|
47 |
+
"You should consider upgrading via the '/Library/Developer/CommandLineTools/usr/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n",
|
48 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
49 |
+
]
|
50 |
+
}
|
51 |
+
],
|
52 |
+
"source": [
|
53 |
+
"%pip install openai\n",
|
54 |
+
"# !pip install typing_extensions==4.7.1 --upgrade"
|
55 |
+
]
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"cell_type": "code",
|
59 |
+
"execution_count": 2,
|
60 |
+
"metadata": {},
|
61 |
+
"outputs": [],
|
62 |
+
"source": [
|
63 |
+
"openai_key='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'"
|
64 |
+
]
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"cell_type": "code",
|
68 |
+
"execution_count": 3,
|
69 |
+
"metadata": {},
|
70 |
+
"outputs": [
|
71 |
+
{
|
72 |
+
"name": "stdout",
|
73 |
+
"output_type": "stream",
|
74 |
+
"text": [
|
75 |
+
"Energy cannot be created or destroyed; it only transforms forms."
|
76 |
+
]
|
77 |
+
}
|
78 |
+
],
|
79 |
+
"source": [
|
80 |
+
"from openai import OpenAI\n",
|
81 |
+
"\n",
|
82 |
+
"client = OpenAI(api_key=openai_key)\n",
|
83 |
+
"\n",
|
84 |
+
"stream = client.chat.completions.create(\n",
|
85 |
+
" model=\"gpt-4o-mini\",\n",
|
86 |
+
" messages=[{\"role\": \"user\", \"content\": \"Say this is a test, and explain law of thermo dynamics in 10 words\"}],\n",
|
87 |
+
" stream=True,\n",
|
88 |
+
")\n",
|
89 |
+
"for chunk in stream:\n",
|
90 |
+
" if chunk.choices[0].delta.content is not None:\n",
|
91 |
+
" print(chunk.choices[0].delta.content, end=\"\")"
|
92 |
+
]
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"cell_type": "code",
|
96 |
+
"execution_count": 12,
|
97 |
+
"metadata": {},
|
98 |
+
"outputs": [],
|
99 |
+
"source": [
|
100 |
+
"with open('cleaned_output.txt', 'r') as file:\n",
|
101 |
+
" content = file.read()\n"
|
102 |
+
]
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"cell_type": "code",
|
106 |
+
"execution_count": 13,
|
107 |
+
"metadata": {},
|
108 |
+
"outputs": [],
|
109 |
+
"source": [
|
110 |
+
"prompt= \"\"\"\n",
|
111 |
+
"Extract detailed project information from the following text and structure it in JSON format. The JSON should have each project as a main key, with tasks as subkeys. For each task, include the following fields: \"description\", \"priority\", \"assigned_to\", and \"current_status\". Use the conversation details to populate the values accurately. \n",
|
112 |
+
"\n",
|
113 |
+
"Text:\n",
|
114 |
+
"'''\n",
|
115 |
+
"{content}\n",
|
116 |
+
"'''\n",
|
117 |
+
"\n",
|
118 |
+
"Expected JSON Output:\n",
|
119 |
+
"{{\n",
|
120 |
+
" \"project_name_1\": {{\n",
|
121 |
+
" \"Task-1\": {{\n",
|
122 |
+
" \"description\": \"Brief description of the task\",\n",
|
123 |
+
" \"priority\": \"high/medium/low\",\n",
|
124 |
+
" \"assigned_to\": \"Person responsible\",\n",
|
125 |
+
" \"current_status\": \"Status of the task (e.g., completed, in progress, pending)\"\n",
|
126 |
+
" }},\n",
|
127 |
+
" \"Task-2\": {{\n",
|
128 |
+
" \"description\": \"Brief description of the task\",\n",
|
129 |
+
" \"priority\": \"high/medium/low\",\n",
|
130 |
+
" \"assigned_to\": \"Person responsible\",\n",
|
131 |
+
" \"current_status\": \"Status of the task (e.g., completed, in progress, pending)\"\n",
|
132 |
+
" }}\n",
|
133 |
+
" }},\n",
|
134 |
+
" \"project_name_2\": {{\n",
|
135 |
+
" \"Task-1\": {{\n",
|
136 |
+
" \"description\": \"Brief description of the task\",\n",
|
137 |
+
" \"priority\": \"high/medium/low\",\n",
|
138 |
+
" \"assigned_to\": \"Person responsible\",\n",
|
139 |
+
" \"current_status\": \"Status of the task (e.g., completed, in progress, pending)\"\n",
|
140 |
+
" }}\n",
|
141 |
+
" }}\n",
|
142 |
+
"}}\n",
|
143 |
+
"\n",
|
144 |
+
"Follow this structure and ensure each project's tasks are accurately represented with the appropriate fields. Keep the output concise and relevant to the project information discussed in the text.\n",
|
145 |
+
"\"\"\""
|
146 |
+
]
|
147 |
+
},
|
148 |
+
{
|
149 |
+
"cell_type": "code",
|
150 |
+
"execution_count": 14,
|
151 |
+
"metadata": {},
|
152 |
+
"outputs": [],
|
153 |
+
"source": [
|
154 |
+
"final_prompt= prompt.format(content=content)"
|
155 |
+
]
|
156 |
+
},
|
157 |
+
{
|
158 |
+
"cell_type": "code",
|
159 |
+
"execution_count": 15,
|
160 |
+
"metadata": {},
|
161 |
+
"outputs": [
|
162 |
+
{
|
163 |
+
"name": "stdout",
|
164 |
+
"output_type": "stream",
|
165 |
+
"text": [
|
166 |
+
"```json\n",
|
167 |
+
"{\n",
|
168 |
+
" \"Bonnie Plant Project\": {\n",
|
169 |
+
" \"Task-1\": {\n",
|
170 |
+
" \"description\": \"Coordinate with Nikate to send out email to Bonnie plans\",\n",
|
171 |
+
" \"priority\": \"medium\",\n",
|
172 |
+
" \"assigned_to\": \"Nikate\",\n",
|
173 |
+
" \"current_status\": \"completed\"\n",
|
174 |
+
" }\n",
|
175 |
+
" },\n",
|
176 |
+
" \"RAG Article and Blog Project\": {\n",
|
177 |
+
" \"Task-1\": {\n",
|
178 |
+
" \"description\": \"Add content to the RAG article and seek feedback\",\n",
|
179 |
+
" \"priority\": \"high\",\n",
|
180 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
181 |
+
" \"current_status\": \"in progress\"\n",
|
182 |
+
" },\n",
|
183 |
+
" \"Task-2\": {\n",
|
184 |
+
" \"description\": \"Review RAG documentation and provide feedback\",\n",
|
185 |
+
" \"priority\": \"high\",\n",
|
186 |
+
" \"assigned_to\": \"Vivek\",\n",
|
187 |
+
" \"current_status\": \"pending\"\n",
|
188 |
+
" }\n",
|
189 |
+
" },\n",
|
190 |
+
" \"G Copilot Case Study\": {\n",
|
191 |
+
" \"Task-1\": {\n",
|
192 |
+
" \"description\": \"Develop a case study outline for G Copilot\",\n",
|
193 |
+
" \"priority\": \"medium\",\n",
|
194 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
195 |
+
" \"current_status\": \"in progress\"\n",
|
196 |
+
" },\n",
|
197 |
+
" \"Task-2\": {\n",
|
198 |
+
" \"description\": \"Provide access credentials for G Copilot team\",\n",
|
199 |
+
" \"priority\": \"medium\",\n",
|
200 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
201 |
+
" \"current_status\": \"in progress\"\n",
|
202 |
+
" }\n",
|
203 |
+
" },\n",
|
204 |
+
" \"Washington Government Project\": {\n",
|
205 |
+
" \"Task-1\": {\n",
|
206 |
+
" \"description\": \"Request summary from the team for submission\",\n",
|
207 |
+
" \"priority\": \"high\",\n",
|
208 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
209 |
+
" \"current_status\": \"pending\"\n",
|
210 |
+
" }\n",
|
211 |
+
" },\n",
|
212 |
+
" \"Internal Tool Development\": {\n",
|
213 |
+
" \"Task-1\": {\n",
|
214 |
+
" \"description\": \"Organize thoughts and provide a high-level presentation on internal tool ideas\",\n",
|
215 |
+
" \"priority\": \"medium\",\n",
|
216 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
217 |
+
" \"current_status\": \"pending\"\n",
|
218 |
+
" }\n",
|
219 |
+
" },\n",
|
220 |
+
" \"Website Innovation\": {\n",
|
221 |
+
" \"Task-1\": {\n",
|
222 |
+
" \"description\": \"Complete website development with integrated features\",\n",
|
223 |
+
" \"priority\": \"medium\",\n",
|
224 |
+
" \"assigned_to\": \"Jaspreet\",\n",
|
225 |
+
" \"current_status\": \"in progress\"\n",
|
226 |
+
" }\n",
|
227 |
+
" }\n",
|
228 |
+
"}\n",
|
229 |
+
"```"
|
230 |
+
]
|
231 |
+
}
|
232 |
+
],
|
233 |
+
"source": [
|
234 |
+
"stream = client.chat.completions.create(\n",
|
235 |
+
" model=\"gpt-4o\",\n",
|
236 |
+
" messages=[{\"role\": \"user\", \"content\": final_prompt}],\n",
|
237 |
+
" stream=True,\n",
|
238 |
+
")\n",
|
239 |
+
"for chunk in stream:\n",
|
240 |
+
" if chunk.choices[0].delta.content is not None:\n",
|
241 |
+
" print(chunk.choices[0].delta.content, end=\"\")"
|
242 |
+
]
|
243 |
+
},
|
244 |
+
{
|
245 |
+
"cell_type": "code",
|
246 |
+
"execution_count": 1,
|
247 |
+
"metadata": {},
|
248 |
+
"outputs": [],
|
249 |
+
"source": [
|
250 |
+
"## PHase-3"
|
251 |
+
]
|
252 |
+
},
|
253 |
+
{
|
254 |
+
"cell_type": "code",
|
255 |
+
"execution_count": 3,
|
256 |
+
"metadata": {},
|
257 |
+
"outputs": [
|
258 |
+
{
|
259 |
+
"name": "stdout",
|
260 |
+
"output_type": "stream",
|
261 |
+
"text": [
|
262 |
+
"Defaulting to user installation because normal site-packages is not writeable\n",
|
263 |
+
"Collecting pymongo\n",
|
264 |
+
" Downloading pymongo-4.10.1-cp39-cp39-macosx_11_0_arm64.whl (781 kB)\n",
|
265 |
+
"\u001b[K |████████████████████████████████| 781 kB 1.1 MB/s eta 0:00:01\n",
|
266 |
+
"\u001b[?25hCollecting dnspython<3.0.0,>=1.16.0\n",
|
267 |
+
" Downloading dnspython-2.7.0-py3-none-any.whl (313 kB)\n",
|
268 |
+
"\u001b[K |████████████████████████████████| 313 kB 3.4 MB/s eta 0:00:01\n",
|
269 |
+
"\u001b[?25hInstalling collected packages: dnspython, pymongo\n",
|
270 |
+
"Successfully installed dnspython-2.7.0 pymongo-4.10.1\n",
|
271 |
+
"\u001b[33mWARNING: You are using pip version 21.2.4; however, version 24.3.1 is available.\n",
|
272 |
+
"You should consider upgrading via the '/Library/Developer/CommandLineTools/usr/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n",
|
273 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
274 |
+
]
|
275 |
+
}
|
276 |
+
],
|
277 |
+
"source": [
|
278 |
+
"%pip install pymongo"
|
279 |
+
]
|
280 |
+
},
|
281 |
+
{
|
282 |
+
"cell_type": "code",
|
283 |
+
"execution_count": 6,
|
284 |
+
"metadata": {},
|
285 |
+
"outputs": [],
|
286 |
+
"source": [
|
287 |
+
"from pymongo import MongoClient\n",
|
288 |
+
"\n",
|
289 |
+
"def get_mongo_client():\n",
|
290 |
+
" \"\"\"Connect to the MongoDB Atlas cluster.\"\"\"\n",
|
291 |
+
" connection_string = \"mongodb+srv://shahid:Protondev%[email protected]/\"\n",
|
292 |
+
" client = MongoClient(connection_string)\n",
|
293 |
+
" return client\n",
|
294 |
+
"\n",
|
295 |
+
"def get_database():\n",
|
296 |
+
" \"\"\"Connect to the task_management database.\"\"\"\n",
|
297 |
+
" client = get_mongo_client()\n",
|
298 |
+
" db = client[\"task_management\"]\n",
|
299 |
+
" return db\n"
|
300 |
+
]
|
301 |
+
},
|
302 |
+
{
|
303 |
+
"cell_type": "code",
|
304 |
+
"execution_count": 7,
|
305 |
+
"metadata": {},
|
306 |
+
"outputs": [],
|
307 |
+
"source": [
|
308 |
+
"def test_connection():\n",
|
309 |
+
" db = get_database()\n",
|
310 |
+
" print(\"Connected to MongoDB:\", db.list_collection_names())"
|
311 |
+
]
|
312 |
+
},
|
313 |
+
{
|
314 |
+
"cell_type": "code",
|
315 |
+
"execution_count": 8,
|
316 |
+
"metadata": {},
|
317 |
+
"outputs": [
|
318 |
+
{
|
319 |
+
"name": "stdout",
|
320 |
+
"output_type": "stream",
|
321 |
+
"text": [
|
322 |
+
"Connected to MongoDB: ['weekly_tasks']\n"
|
323 |
+
]
|
324 |
+
}
|
325 |
+
],
|
326 |
+
"source": [
|
327 |
+
"test_connection()"
|
328 |
+
]
|
329 |
+
},
|
330 |
+
{
|
331 |
+
"cell_type": "code",
|
332 |
+
"execution_count": 9,
|
333 |
+
"metadata": {},
|
334 |
+
"outputs": [],
|
335 |
+
"source": [
|
336 |
+
"def insert_weekly_task_data(json_data, week_identifier):\n",
|
337 |
+
" \"\"\"\n",
|
338 |
+
" Insert JSON data into the weekly_tasks collection.\n",
|
339 |
+
"\n",
|
340 |
+
" Args:\n",
|
341 |
+
" json_data (dict): JSON object containing task data.\n",
|
342 |
+
" week_identifier (str): A unique identifier for the week (e.g., \"Week_1\").\n",
|
343 |
+
" \"\"\"\n",
|
344 |
+
" db = get_database()\n",
|
345 |
+
" collection = db[\"weekly_tasks\"]\n",
|
346 |
+
" \n",
|
347 |
+
" # Check if a document for the given week already exists\n",
|
348 |
+
" existing_document = collection.find_one({\"week\": week_identifier})\n",
|
349 |
+
" if existing_document:\n",
|
350 |
+
" print(f\"Document for {week_identifier} already exists. Skipping insert.\")\n",
|
351 |
+
" return\n",
|
352 |
+
"\n",
|
353 |
+
" # Insert the document if it doesn't already exist\n",
|
354 |
+
" document = {\n",
|
355 |
+
" \"week\": week_identifier,\n",
|
356 |
+
" \"tasks\": json_data\n",
|
357 |
+
" }\n",
|
358 |
+
" result = collection.insert_one(document)\n",
|
359 |
+
" print(f\"Inserted document with ID: {result.inserted_id}\")"
|
360 |
+
]
|
361 |
+
},
|
362 |
+
{
|
363 |
+
"cell_type": "code",
|
364 |
+
"execution_count": 17,
|
365 |
+
"metadata": {},
|
366 |
+
"outputs": [],
|
367 |
+
"source": [
|
368 |
+
"json_data= '''{\n",
|
369 |
+
" \"Bonnie Plant Project\": {\n",
|
370 |
+
" \"Task-1\": {\n",
|
371 |
+
" \"description\": \"Coordinate with Nikate to send out email to Bonnie plans\",\n",
|
372 |
+
" \"priority\": \"medium\",\n",
|
373 |
+
" \"assigned_to\": \"Nikate\",\n",
|
374 |
+
" \"current_status\": \"completed\"\n",
|
375 |
+
" }\n",
|
376 |
+
" },\n",
|
377 |
+
" \"RAG Article and Blog Project\": {\n",
|
378 |
+
" \"Task-1\": {\n",
|
379 |
+
" \"description\": \"Add content to the RAG article and seek feedback\",\n",
|
380 |
+
" \"priority\": \"high\",\n",
|
381 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
382 |
+
" \"current_status\": \"in progress\"\n",
|
383 |
+
" },\n",
|
384 |
+
" \"Task-2\": {\n",
|
385 |
+
" \"description\": \"Review RAG documentation and provide feedback\",\n",
|
386 |
+
" \"priority\": \"high\",\n",
|
387 |
+
" \"assigned_to\": \"Vivek\",\n",
|
388 |
+
" \"current_status\": \"pending\"\n",
|
389 |
+
" }\n",
|
390 |
+
" },\n",
|
391 |
+
" \"G Copilot Case Study\": {\n",
|
392 |
+
" \"Task-1\": {\n",
|
393 |
+
" \"description\": \"Develop a case study outline for G Copilot\",\n",
|
394 |
+
" \"priority\": \"medium\",\n",
|
395 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
396 |
+
" \"current_status\": \"in progress\"\n",
|
397 |
+
" },\n",
|
398 |
+
" \"Task-2\": {\n",
|
399 |
+
" \"description\": \"Provide access credentials for G Copilot team\",\n",
|
400 |
+
" \"priority\": \"medium\",\n",
|
401 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
402 |
+
" \"current_status\": \"in progress\"\n",
|
403 |
+
" }\n",
|
404 |
+
" },\n",
|
405 |
+
" \"Washington Government Project\": {\n",
|
406 |
+
" \"Task-1\": {\n",
|
407 |
+
" \"description\": \"Request summary from the team for submission\",\n",
|
408 |
+
" \"priority\": \"high\",\n",
|
409 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
410 |
+
" \"current_status\": \"pending\"\n",
|
411 |
+
" }\n",
|
412 |
+
" },\n",
|
413 |
+
" \"Internal Tool Development\": {\n",
|
414 |
+
" \"Task-1\": {\n",
|
415 |
+
" \"description\": \"Organize thoughts and provide a high-level presentation on internal tool ideas\",\n",
|
416 |
+
" \"priority\": \"medium\",\n",
|
417 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
418 |
+
" \"current_status\": \"pending\"\n",
|
419 |
+
" }\n",
|
420 |
+
" },\n",
|
421 |
+
" \"Website Innovation\": {\n",
|
422 |
+
" \"Task-1\": {\n",
|
423 |
+
" \"description\": \"Complete website development with integrated features\",\n",
|
424 |
+
" \"priority\": \"medium\",\n",
|
425 |
+
" \"assigned_to\": \"Jaspreet\",\n",
|
426 |
+
" \"current_status\": \"in progress\"\n",
|
427 |
+
" }\n",
|
428 |
+
" }\n",
|
429 |
+
"}'''"
|
430 |
+
]
|
431 |
+
},
|
432 |
+
{
|
433 |
+
"cell_type": "code",
|
434 |
+
"execution_count": 18,
|
435 |
+
"metadata": {},
|
436 |
+
"outputs": [
|
437 |
+
{
|
438 |
+
"name": "stdout",
|
439 |
+
"output_type": "stream",
|
440 |
+
"text": [
|
441 |
+
"Inserted document with ID: 673a4885353ff570ac1d3779\n"
|
442 |
+
]
|
443 |
+
}
|
444 |
+
],
|
445 |
+
"source": [
|
446 |
+
"week_identifier= \"Week_1\"\n",
|
447 |
+
"insert_weekly_task_data(json_data, week_identifier)"
|
448 |
+
]
|
449 |
+
},
|
450 |
+
{
|
451 |
+
"cell_type": "code",
|
452 |
+
"execution_count": 19,
|
453 |
+
"metadata": {},
|
454 |
+
"outputs": [
|
455 |
+
{
|
456 |
+
"name": "stdout",
|
457 |
+
"output_type": "stream",
|
458 |
+
"text": [
|
459 |
+
"Defaulting to user installation because normal site-packages is not writeable\n",
|
460 |
+
"Collecting paperswithcode-client\n",
|
461 |
+
" Downloading paperswithcode_client-0.3.1-py3-none-any.whl (24 kB)\n",
|
462 |
+
"Collecting tea-console==0.0.6\n",
|
463 |
+
" Downloading tea_console-0.0.6-py3-none-any.whl (12 kB)\n",
|
464 |
+
"Collecting typer==0.3.2\n",
|
465 |
+
" Downloading typer-0.3.2-py3-none-any.whl (21 kB)\n",
|
466 |
+
"Collecting tea-client==0.0.7\n",
|
467 |
+
" Downloading tea_client-0.0.7-py3-none-any.whl (11 kB)\n",
|
468 |
+
"Collecting tea~=0.1.2\n",
|
469 |
+
" Downloading tea-0.1.7-py3-none-any.whl (41 kB)\n",
|
470 |
+
"\u001b[K |████████████████████████████████| 41 kB 1.0 MB/s eta 0:00:01\n",
|
471 |
+
"\u001b[?25hCollecting pydantic~=1.6.1\n",
|
472 |
+
" Downloading pydantic-1.6.2-py36.py37.py38-none-any.whl (99 kB)\n",
|
473 |
+
"\u001b[K |████████████████████████████████| 99 kB 2.1 MB/s eta 0:00:011\n",
|
474 |
+
"\u001b[?25hCollecting httpx[http2]~=0.14.2\n",
|
475 |
+
" Downloading httpx-0.14.3-py3-none-any.whl (62 kB)\n",
|
476 |
+
"\u001b[K |████████████████████████████████| 62 kB 2.8 MB/s eta 0:00:01\n",
|
477 |
+
"\u001b[?25hCollecting tzlocal~=2.1\n",
|
478 |
+
" Downloading tzlocal-2.1-py2.py3-none-any.whl (16 kB)\n",
|
479 |
+
"Collecting rich~=9.11.0\n",
|
480 |
+
" Downloading rich-9.11.1-py3-none-any.whl (195 kB)\n",
|
481 |
+
"\u001b[K |████████████████████████████████| 195 kB 7.1 MB/s eta 0:00:01\n",
|
482 |
+
"\u001b[?25hCollecting pytz~=2021.1\n",
|
483 |
+
" Downloading pytz-2021.3-py2.py3-none-any.whl (503 kB)\n",
|
484 |
+
"\u001b[K |████████████████████████████████| 503 kB 4.8 MB/s eta 0:00:01\n",
|
485 |
+
"\u001b[?25hCollecting click<7.2.0,>=7.1.1\n",
|
486 |
+
" Downloading click-7.1.2-py2.py3-none-any.whl (82 kB)\n",
|
487 |
+
"\u001b[K |████████████████████████████████| 82 kB 2.0 MB/s eta 0:00:011\n",
|
488 |
+
"\u001b[?25hCollecting rfc3986[idna2008]<2,>=1.3\n",
|
489 |
+
" Downloading rfc3986-1.5.0-py2.py3-none-any.whl (31 kB)\n",
|
490 |
+
"Collecting chardet==3.*\n",
|
491 |
+
" Downloading chardet-3.0.4-py2.py3-none-any.whl (133 kB)\n",
|
492 |
+
"\u001b[K |████████████████████████████████| 133 kB 4.2 MB/s eta 0:00:01\n",
|
493 |
+
"\u001b[?25hRequirement already satisfied: certifi in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from httpx[http2]~=0.14.2->tea-client==0.0.7->paperswithcode-client) (2024.8.30)\n",
|
494 |
+
"Collecting httpcore==0.10.*\n",
|
495 |
+
" Downloading httpcore-0.10.2-py3-none-any.whl (48 kB)\n",
|
496 |
+
"\u001b[K |████████████████████████████████| 48 kB 2.2 MB/s eta 0:00:011\n",
|
497 |
+
"\u001b[?25hRequirement already satisfied: sniffio in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from httpx[http2]~=0.14.2->tea-client==0.0.7->paperswithcode-client) (1.3.1)\n",
|
498 |
+
"Collecting h2==3.*\n",
|
499 |
+
" Downloading h2-3.2.0-py2.py3-none-any.whl (65 kB)\n",
|
500 |
+
"\u001b[K |████████████████████████████████| 65 kB 2.9 MB/s eta 0:00:011\n",
|
501 |
+
"\u001b[?25hCollecting hyperframe<6,>=5.2.0\n",
|
502 |
+
" Downloading hyperframe-5.2.0-py2.py3-none-any.whl (12 kB)\n",
|
503 |
+
"Collecting hpack<4,>=3.0\n",
|
504 |
+
" Downloading hpack-3.0.0-py2.py3-none-any.whl (38 kB)\n",
|
505 |
+
"Collecting h11<0.10,>=0.8\n",
|
506 |
+
" Downloading h11-0.9.0-py2.py3-none-any.whl (53 kB)\n",
|
507 |
+
"\u001b[K |████████████████████████████████| 53 kB 2.2 MB/s eta 0:00:01\n",
|
508 |
+
"\u001b[?25hRequirement already satisfied: idna in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from rfc3986[idna2008]<2,>=1.3->httpx[http2]~=0.14.2->tea-client==0.0.7->paperswithcode-client) (3.10)\n",
|
509 |
+
"Collecting colorama<0.5.0,>=0.4.0\n",
|
510 |
+
" Downloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n",
|
511 |
+
"Requirement already satisfied: pygments<3.0.0,>=2.6.0 in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from rich~=9.11.0->tea-console==0.0.6->paperswithcode-client) (2.18.0)\n",
|
512 |
+
"Collecting typing-extensions<4.0.0,>=3.7.4\n",
|
513 |
+
" Downloading typing_extensions-3.10.0.2-py3-none-any.whl (26 kB)\n",
|
514 |
+
"Collecting commonmark<0.10.0,>=0.9.0\n",
|
515 |
+
" Downloading commonmark-0.9.1-py2.py3-none-any.whl (51 kB)\n",
|
516 |
+
"\u001b[K |████████████████████████████████| 51 kB 2.2 MB/s eta 0:00:01\n",
|
517 |
+
"\u001b[?25hCollecting tea~=0.1.2\n",
|
518 |
+
" Downloading tea-0.1.6-py3-none-any.whl (41 kB)\n",
|
519 |
+
"\u001b[K |████████████████████████████████| 41 kB 1.2 MB/s eta 0:00:01\n",
|
520 |
+
"\u001b[?25h Downloading tea-0.1.5-py3-none-any.whl (41 kB)\n",
|
521 |
+
"\u001b[K |████████████████████████████████| 41 kB 427 kB/s eta 0:00:01\n",
|
522 |
+
"\u001b[?25h Downloading tea-0.1.4-py3-none-any.whl (41 kB)\n",
|
523 |
+
"\u001b[K |████████████████████████████████| 41 kB 617 kB/s eta 0:00:01\n",
|
524 |
+
"\u001b[?25hCollecting psutil~=5.8.0\n",
|
525 |
+
" Downloading psutil-5.8.0.tar.gz (470 kB)\n",
|
526 |
+
"\u001b[K |████████████████████████████████| 470 kB 12.9 MB/s eta 0:00:01\n",
|
527 |
+
"\u001b[?25hBuilding wheels for collected packages: psutil\n",
|
528 |
+
" Building wheel for psutil (setup.py) ... \u001b[?25ldone\n",
|
529 |
+
"\u001b[?25h Created wheel for psutil: filename=psutil-5.8.0-cp39-cp39-macosx_10_9_universal2.whl size=260525 sha256=f4aecba874d0f5983f0d73f276f033eb7f8a6f4fd7f070b2783d73818fb94eac\n",
|
530 |
+
" Stored in directory: /Users/sk4467/Library/Caches/pip/wheels/ee/66/e6/aecfd75e0bd554fc1b4dd982e9088dbdc79d10c3601cf3d7f3\n",
|
531 |
+
"Successfully built psutil\n",
|
532 |
+
"Installing collected packages: rfc3986, h11, pytz, hyperframe, httpcore, hpack, chardet, tzlocal, typing-extensions, psutil, httpx, h2, commonmark, colorama, click, typer, tea, rich, pydantic, tea-console, tea-client, paperswithcode-client\n",
|
533 |
+
" Attempting uninstall: h11\n",
|
534 |
+
" Found existing installation: h11 0.14.0\n",
|
535 |
+
" Uninstalling h11-0.14.0:\n",
|
536 |
+
" Successfully uninstalled h11-0.14.0\n",
|
537 |
+
" Attempting uninstall: httpcore\n",
|
538 |
+
" Found existing installation: httpcore 1.0.6\n",
|
539 |
+
" Uninstalling httpcore-1.0.6:\n",
|
540 |
+
" Successfully uninstalled httpcore-1.0.6\n",
|
541 |
+
" Attempting uninstall: typing-extensions\n",
|
542 |
+
" Found existing installation: typing-extensions 4.12.2\n",
|
543 |
+
" Uninstalling typing-extensions-4.12.2:\n",
|
544 |
+
" Successfully uninstalled typing-extensions-4.12.2\n",
|
545 |
+
" Attempting uninstall: psutil\n",
|
546 |
+
" Found existing installation: psutil 6.1.0\n",
|
547 |
+
" Uninstalling psutil-6.1.0:\n",
|
548 |
+
" Successfully uninstalled psutil-6.1.0\n",
|
549 |
+
" Attempting uninstall: httpx\n",
|
550 |
+
" Found existing installation: httpx 0.27.2\n",
|
551 |
+
" Uninstalling httpx-0.27.2:\n",
|
552 |
+
" Successfully uninstalled httpx-0.27.2\n",
|
553 |
+
" Attempting uninstall: pydantic\n",
|
554 |
+
" Found existing installation: pydantic 2.9.2\n",
|
555 |
+
" Uninstalling pydantic-2.9.2:\n",
|
556 |
+
" Successfully uninstalled pydantic-2.9.2\n",
|
557 |
+
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
|
558 |
+
"pydantic-core 2.23.4 requires typing-extensions!=4.7.0,>=4.6.0, but you have typing-extensions 3.10.0.2 which is incompatible.\n",
|
559 |
+
"openai 1.54.3 requires httpx<1,>=0.23.0, but you have httpx 0.14.3 which is incompatible.\n",
|
560 |
+
"openai 1.54.3 requires pydantic<3,>=1.9.0, but you have pydantic 1.6.2 which is incompatible.\n",
|
561 |
+
"openai 1.54.3 requires typing-extensions<5,>=4.11, but you have typing-extensions 3.10.0.2 which is incompatible.\n",
|
562 |
+
"anyio 4.6.2.post1 requires typing-extensions>=4.1; python_version < \"3.11\", but you have typing-extensions 3.10.0.2 which is incompatible.\u001b[0m\n",
|
563 |
+
"Successfully installed chardet-3.0.4 click-7.1.2 colorama-0.4.6 commonmark-0.9.1 h11-0.9.0 h2-3.2.0 hpack-3.0.0 httpcore-0.10.2 httpx-0.14.3 hyperframe-5.2.0 paperswithcode-client-0.3.1 psutil-5.8.0 pydantic-1.6.2 pytz-2021.3 rfc3986-1.5.0 rich-9.11.1 tea-0.1.4 tea-client-0.0.7 tea-console-0.0.6 typer-0.3.2 typing-extensions-3.10.0.2 tzlocal-2.1\n",
|
564 |
+
"\u001b[33mWARNING: You are using pip version 21.2.4; however, version 24.3.1 is available.\n",
|
565 |
+
"You should consider upgrading via the '/Library/Developer/CommandLineTools/usr/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n",
|
566 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
567 |
+
]
|
568 |
+
}
|
569 |
+
],
|
570 |
+
"source": [
|
571 |
+
"%pip install paperswithcode-client"
|
572 |
+
]
|
573 |
+
},
|
574 |
+
{
|
575 |
+
"cell_type": "code",
|
576 |
+
"execution_count": 1,
|
577 |
+
"metadata": {},
|
578 |
+
"outputs": [
|
579 |
+
{
|
580 |
+
"name": "stdout",
|
581 |
+
"output_type": "stream",
|
582 |
+
"text": [
|
583 |
+
"Defaulting to user installation because normal site-packages is not writeable\n",
|
584 |
+
"Collecting notion-client\n",
|
585 |
+
" Downloading notion_client-2.2.1-py2.py3-none-any.whl (13 kB)\n",
|
586 |
+
"Collecting httpx>=0.15.0\n",
|
587 |
+
" Using cached httpx-0.27.2-py3-none-any.whl (76 kB)\n",
|
588 |
+
"Requirement already satisfied: anyio in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from httpx>=0.15.0->notion-client) (4.6.2.post1)\n",
|
589 |
+
"Requirement already satisfied: sniffio in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from httpx>=0.15.0->notion-client) (1.3.1)\n",
|
590 |
+
"Collecting httpcore==1.*\n",
|
591 |
+
" Downloading httpcore-1.0.7-py3-none-any.whl (78 kB)\n",
|
592 |
+
"\u001b[K |████████████████████████████████| 78 kB 1.3 MB/s eta 0:00:01\n",
|
593 |
+
"\u001b[?25hRequirement already satisfied: certifi in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from httpx>=0.15.0->notion-client) (2024.8.30)\n",
|
594 |
+
"Requirement already satisfied: idna in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from httpx>=0.15.0->notion-client) (3.10)\n",
|
595 |
+
"Collecting h11<0.15,>=0.13\n",
|
596 |
+
" Using cached h11-0.14.0-py3-none-any.whl (58 kB)\n",
|
597 |
+
"Collecting typing-extensions>=4.1\n",
|
598 |
+
" Using cached typing_extensions-4.12.2-py3-none-any.whl (37 kB)\n",
|
599 |
+
"Requirement already satisfied: exceptiongroup>=1.0.2 in /Users/sk4467/Library/Python/3.9/lib/python/site-packages (from anyio->httpx>=0.15.0->notion-client) (1.2.2)\n",
|
600 |
+
"Installing collected packages: typing-extensions, h11, httpcore, httpx, notion-client\n",
|
601 |
+
" Attempting uninstall: typing-extensions\n",
|
602 |
+
" Found existing installation: typing-extensions 3.10.0.2\n",
|
603 |
+
" Uninstalling typing-extensions-3.10.0.2:\n",
|
604 |
+
" Successfully uninstalled typing-extensions-3.10.0.2\n",
|
605 |
+
" Attempting uninstall: h11\n",
|
606 |
+
" Found existing installation: h11 0.9.0\n",
|
607 |
+
" Uninstalling h11-0.9.0:\n",
|
608 |
+
" Successfully uninstalled h11-0.9.0\n",
|
609 |
+
" Attempting uninstall: httpcore\n",
|
610 |
+
" Found existing installation: httpcore 0.10.2\n",
|
611 |
+
" Uninstalling httpcore-0.10.2:\n",
|
612 |
+
" Successfully uninstalled httpcore-0.10.2\n",
|
613 |
+
" Attempting uninstall: httpx\n",
|
614 |
+
" Found existing installation: httpx 0.14.3\n",
|
615 |
+
" Uninstalling httpx-0.14.3:\n",
|
616 |
+
" Successfully uninstalled httpx-0.14.3\n",
|
617 |
+
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
|
618 |
+
"tea-client 0.0.7 requires httpx[http2]~=0.14.2, but you have httpx 0.27.2 which is incompatible.\n",
|
619 |
+
"rich 9.11.1 requires typing-extensions<4.0.0,>=3.7.4, but you have typing-extensions 4.12.2 which is incompatible.\n",
|
620 |
+
"openai 1.54.3 requires pydantic<3,>=1.9.0, but you have pydantic 1.6.2 which is incompatible.\u001b[0m\n",
|
621 |
+
"Successfully installed h11-0.14.0 httpcore-1.0.7 httpx-0.27.2 notion-client-2.2.1 typing-extensions-4.12.2\n",
|
622 |
+
"\u001b[33mWARNING: You are using pip version 21.2.4; however, version 24.3.1 is available.\n",
|
623 |
+
"You should consider upgrading via the '/Library/Developer/CommandLineTools/usr/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n",
|
624 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
625 |
+
]
|
626 |
+
}
|
627 |
+
],
|
628 |
+
"source": [
|
629 |
+
"%pip install notion-client"
|
630 |
+
]
|
631 |
+
},
|
632 |
+
{
|
633 |
+
"cell_type": "code",
|
634 |
+
"execution_count": 24,
|
635 |
+
"metadata": {},
|
636 |
+
"outputs": [],
|
637 |
+
"source": [
|
638 |
+
"from notion_client import Client\n",
|
639 |
+
"\n",
|
640 |
+
"# Initialize Notion client with your integration token\n",
|
641 |
+
"notion = Client(auth=\"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\")\n",
|
642 |
+
"\n",
|
643 |
+
"# Create a new database or append rows to an existing one\n",
|
644 |
+
"parent_page_id = \"148b2f92b9948099a854e8b21a0640a3\" # Replace with your parent page ID\n",
|
645 |
+
"\n",
|
646 |
+
"# Define properties of your new database\n",
|
647 |
+
"database_properties = {\n",
|
648 |
+
" \"Name\": {\"title\": {}},\n",
|
649 |
+
" \"Age\": {\"number\": {}},\n",
|
650 |
+
" \"Role\": {\"rich_text\": {}},\n",
|
651 |
+
"}\n",
|
652 |
+
"\n",
|
653 |
+
"# Create a new database\n",
|
654 |
+
"database = notion.databases.create(\n",
|
655 |
+
" parent={\"type\": \"page_id\", \"page_id\": parent_page_id},\n",
|
656 |
+
" title=[{\"type\": \"text\", \"text\": {\"content\": \"My JSON Table\"}}],\n",
|
657 |
+
" properties=database_properties,\n",
|
658 |
+
")\n",
|
659 |
+
"\n",
|
660 |
+
"database_id = database[\"id\"]\n",
|
661 |
+
"\n",
|
662 |
+
"# Add rows to the database\n",
|
663 |
+
"json_data = [\n",
|
664 |
+
" {\"Name\": \"Alice\", \"Age\": 25, \"Role\": \"Engineer\"},\n",
|
665 |
+
" {\"Name\": \"Bob\", \"Age\": 30, \"Role\": \"Designer\"}\n",
|
666 |
+
"]\n",
|
667 |
+
"\n",
|
668 |
+
"for row in json_data:\n",
|
669 |
+
" notion.pages.create(\n",
|
670 |
+
" parent={\"database_id\": database_id},\n",
|
671 |
+
" properties={\n",
|
672 |
+
" \"Name\": {\"title\": [{\"type\": \"text\", \"text\": {\"content\": row[\"Name\"]}}]},\n",
|
673 |
+
" \"Age\": {\"number\": row[\"Age\"]},\n",
|
674 |
+
" \"Role\": {\"rich_text\": [{\"type\": \"text\", \"text\": {\"content\": row[\"Role\"]}}]},\n",
|
675 |
+
" }\n",
|
676 |
+
" )\n"
|
677 |
+
]
|
678 |
+
},
|
679 |
+
{
|
680 |
+
"cell_type": "code",
|
681 |
+
"execution_count": 41,
|
682 |
+
"metadata": {},
|
683 |
+
"outputs": [
|
684 |
+
{
|
685 |
+
"name": "stdout",
|
686 |
+
"output_type": "stream",
|
687 |
+
"text": [
|
688 |
+
"Database created with ID: 14db2f92-b994-81fb-9132-f4e4cb46ac13\n"
|
689 |
+
]
|
690 |
+
}
|
691 |
+
],
|
692 |
+
"source": [
|
693 |
+
"from notion_client import Client\n",
|
694 |
+
"\n",
|
695 |
+
"# Initialize Notion client\n",
|
696 |
+
"notion = Client(auth=\"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\")\n",
|
697 |
+
"\n",
|
698 |
+
"# Page ID where the database will be created\n",
|
699 |
+
"parent_page_id = \"148b2f92b9948099a854e8b21a0640a3\"\n",
|
700 |
+
"\n",
|
701 |
+
"# Define the database schema\n",
|
702 |
+
"database_schema = {\n",
|
703 |
+
" \"parent\": {\"type\": \"page_id\", \"page_id\": parent_page_id},\n",
|
704 |
+
" \"title\": [{\"type\": \"text\", \"text\": {\"content\": \"Task Dashboard\"}}],\n",
|
705 |
+
" \"properties\": {\n",
|
706 |
+
" \"Project Name\": {\"title\": {}},\n",
|
707 |
+
" \"Task ID\": {\"rich_text\": {}},\n",
|
708 |
+
" \"Description\": {\"rich_text\": {}},\n",
|
709 |
+
" \"Priority\": {\"select\": {\"options\": [\n",
|
710 |
+
" {\"name\": \"high\", \"color\": \"red\"},\n",
|
711 |
+
" {\"name\": \"medium\", \"color\": \"yellow\"},\n",
|
712 |
+
" {\"name\": \"low\", \"color\": \"green\"}\n",
|
713 |
+
" ]}},\n",
|
714 |
+
" \"Assigned To\": {\"rich_text\": {}},\n",
|
715 |
+
" \"Current Status\": {\"select\": {\"options\": [\n",
|
716 |
+
" {\"name\": \"completed\", \"color\": \"blue\"},\n",
|
717 |
+
" {\"name\": \"in progress\", \"color\": \"yellow\"},\n",
|
718 |
+
" {\"name\": \"pending\", \"color\": \"orange\"}\n",
|
719 |
+
" ]}},\n",
|
720 |
+
" \"Created At\": {\"date\": {}}\n",
|
721 |
+
" }\n",
|
722 |
+
"}\n",
|
723 |
+
"\n",
|
724 |
+
"# Create the database\n",
|
725 |
+
"response = notion.databases.create(**database_schema)\n",
|
726 |
+
"\n",
|
727 |
+
"# Print the database ID\n",
|
728 |
+
"print(\"Database created with ID:\", response[\"id\"])\n"
|
729 |
+
]
|
730 |
+
},
|
731 |
+
{
|
732 |
+
"cell_type": "markdown",
|
733 |
+
"metadata": {},
|
734 |
+
"source": [
|
735 |
+
"14db2f92-b994-8140-bee6-d4540d75c374\n",
|
736 |
+
"\n",
|
737 |
+
"14db2f92-b994-81fb-9132-f4e4cb46ac13"
|
738 |
+
]
|
739 |
+
},
|
740 |
+
{
|
741 |
+
"cell_type": "code",
|
742 |
+
"execution_count": 51,
|
743 |
+
"metadata": {},
|
744 |
+
"outputs": [],
|
745 |
+
"source": [
|
746 |
+
"from pymongo import DESCENDING\n",
|
747 |
+
"mongo_client = MongoClient(\"mongodb+srv://shahid:Protondev%[email protected]/\") # Replace with your MongoDB URI\n",
|
748 |
+
"db = mongo_client[\"task_management\"]\n",
|
749 |
+
"employee_project_collection = db[\"employee_project\"]\n",
|
750 |
+
"def fetch_latest_task_entry():\n",
|
751 |
+
" \"\"\"\n",
|
752 |
+
" Fetch the most recent entry from MongoDB.\n",
|
753 |
+
" Returns:\n",
|
754 |
+
" dict: The latest task entry as a dictionary.\n",
|
755 |
+
" \"\"\"\n",
|
756 |
+
" latest_entry = employee_project_collection.find_one(sort=[(\"created_at\", DESCENDING)])\n",
|
757 |
+
" if latest_entry:\n",
|
758 |
+
" return latest_entry\n",
|
759 |
+
" else:\n",
|
760 |
+
" raise ValueError(\"No entries found in MongoDB.\")"
|
761 |
+
]
|
762 |
+
},
|
763 |
+
{
|
764 |
+
"cell_type": "code",
|
765 |
+
"execution_count": 52,
|
766 |
+
"metadata": {},
|
767 |
+
"outputs": [],
|
768 |
+
"source": [
|
769 |
+
"latest_entry= fetch_latest_task_entry()"
|
770 |
+
]
|
771 |
+
},
|
772 |
+
{
|
773 |
+
"cell_type": "code",
|
774 |
+
"execution_count": 49,
|
775 |
+
"metadata": {},
|
776 |
+
"outputs": [],
|
777 |
+
"source": [
|
778 |
+
"notion = Client(auth=\"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\")\n",
|
779 |
+
"parent_page_id = \"148b2f92b9948099a854e8b21a0640a3\" \n",
|
780 |
+
"notion_database_id = \"14db2f92-b994-81fb-9132-f4e4cb46ac13\"\n",
|
781 |
+
"from datetime import datetime\n",
|
782 |
+
"def push_to_notion(latest_entry):\n",
|
783 |
+
" \"\"\"\n",
|
784 |
+
" Push tasks from the latest entry to the Notion database.\n",
|
785 |
+
" Args:\n",
|
786 |
+
" latest_entry (dict): The most recent task data from MongoDB.\n",
|
787 |
+
" \"\"\"\n",
|
788 |
+
" # Extract the tasks from the JSON\n",
|
789 |
+
" tasks = latest_entry.get(\"consolidated_final_task\", {})\n",
|
790 |
+
" created_at = latest_entry.get(\"created_at\", None)\n",
|
791 |
+
"\n",
|
792 |
+
" # Iterate over projects and their tasks\n",
|
793 |
+
" for project_name, task_list in tasks.items():\n",
|
794 |
+
" for task_id, task_details in task_list.items():\n",
|
795 |
+
" # Map MongoDB fields to Notion properties\n",
|
796 |
+
" notion_task = {\n",
|
797 |
+
" \"parent\": {\"database_id\": notion_database_id},\n",
|
798 |
+
" \"properties\": {\n",
|
799 |
+
" \"Project Name\": {\"title\": [{\"type\": \"text\", \"text\": {\"content\": project_name}}]},\n",
|
800 |
+
" \"Task ID\": {\"rich_text\": [{\"type\": \"text\", \"text\": {\"content\": task_id}}]},\n",
|
801 |
+
" \"Description\": {\"rich_text\": [{\"type\": \"text\", \"text\": {\"content\": task_details.get(\"description\", \"\")}}]},\n",
|
802 |
+
" \"Priority\": {\"select\": {\"name\": task_details.get(\"priority\", \"low\")}},\n",
|
803 |
+
" \"Assigned To\": {\"rich_text\": [{\"type\": \"text\", \"text\": {\"content\": task_details.get(\"assigned_to\", \"\")}}]}, # Updated to rich_text\n",
|
804 |
+
" \"Current Status\": {\"select\": {\"name\": task_details.get(\"current_status\", \"pending\")}},\n",
|
805 |
+
" \"Created At\": {\"date\": {\"start\": created_at.isoformat() if created_at else datetime.utcnow().isoformat()}}\n",
|
806 |
+
" }\n",
|
807 |
+
" }\n",
|
808 |
+
"\n",
|
809 |
+
" # Push each task to Notion\n",
|
810 |
+
" try:\n",
|
811 |
+
" response = notion.pages.create(**notion_task)\n",
|
812 |
+
" print(f\"Task pushed to Notion: {response['id']}\")\n",
|
813 |
+
" except Exception as e:\n",
|
814 |
+
" print(f\"Failed to push task {task_id} to Notion: {e}\")"
|
815 |
+
]
|
816 |
+
},
|
817 |
+
{
|
818 |
+
"cell_type": "code",
|
819 |
+
"execution_count": 53,
|
820 |
+
"metadata": {},
|
821 |
+
"outputs": [],
|
822 |
+
"source": [
|
823 |
+
"def push_to_notion(latest_entry):\n",
|
824 |
+
" \"\"\"\n",
|
825 |
+
" Push tasks from the latest entry to the Notion database.\n",
|
826 |
+
" Args:\n",
|
827 |
+
" latest_entry (dict): The most recent task data from MongoDB.\n",
|
828 |
+
" \"\"\"\n",
|
829 |
+
" # Extract the tasks from the JSON\n",
|
830 |
+
" tasks = latest_entry.get(\"consolidated_final_task\", {})\n",
|
831 |
+
" created_at = latest_entry.get(\"created_at\", None)\n",
|
832 |
+
"\n",
|
833 |
+
" # Step 1: Clear existing tasks in Notion database\n",
|
834 |
+
" \n",
|
835 |
+
" try:\n",
|
836 |
+
" # Query all pages in the Notion database (this will fetch the existing tasks)\n",
|
837 |
+
" notion_database = notion.databases.query(database_id=notion_database_id)\n",
|
838 |
+
" \n",
|
839 |
+
" # Loop through the database pages and delete them\n",
|
840 |
+
" for page in notion_database['results']:\n",
|
841 |
+
" notion.pages.update(page_id=page['id'], archived=True)\n",
|
842 |
+
" print(\"Old tasks removed from Notion successfully.\")\n",
|
843 |
+
" except Exception as e:\n",
|
844 |
+
" print(f\"Failed to clear tasks in Notion: {e}\")\n",
|
845 |
+
"\n",
|
846 |
+
"# Step 2: Push new tasks to Notion\n",
|
847 |
+
" try:\n",
|
848 |
+
" # Iterate over projects and their tasks\n",
|
849 |
+
" for project_name, task_list in tasks.items():\n",
|
850 |
+
" for task_id, task_details in task_list.items():\n",
|
851 |
+
" # Map MongoDB fields to Notion properties\n",
|
852 |
+
" notion_task = {\n",
|
853 |
+
" \"parent\": {\"database_id\": notion_database_id},\n",
|
854 |
+
" \"properties\": {\n",
|
855 |
+
" \"Project Name\": {\"title\": [{\"type\": \"text\", \"text\": {\"content\": project_name}}]},\n",
|
856 |
+
" \"Task ID\": {\"rich_text\": [{\"type\": \"text\", \"text\": {\"content\": task_id}}]},\n",
|
857 |
+
" \"Description\": {\"rich_text\": [{\"type\": \"text\", \"text\": {\"content\": task_details.get(\"description\", \"\")}}]},\n",
|
858 |
+
" \"Priority\": {\"select\": {\"name\": task_details.get(\"priority\", \"low\")}},\n",
|
859 |
+
" \"Assigned To\": {\"rich_text\": [{\"type\": \"text\", \"text\": {\"content\": task_details.get(\"assigned_to\", \"\")}}]}, # Updated to rich_text\n",
|
860 |
+
" \"Current Status\": {\"select\": {\"name\": task_details.get(\"current_status\", \"pending\")}},\n",
|
861 |
+
" \"Created At\": {\"date\": {\"start\": created_at.isoformat() if created_at else datetime.utcnow().isoformat()}}\n",
|
862 |
+
" }\n",
|
863 |
+
" }\n",
|
864 |
+
"\n",
|
865 |
+
" # Push each task to Notion\n",
|
866 |
+
" response = notion.pages.create(**notion_task)\n",
|
867 |
+
" print(f\"Task pushed to Notion: {response['id']}\")\n",
|
868 |
+
" except Exception as e:\n",
|
869 |
+
" print(f\"Failed to push tasks to Notion: {e}\")\n"
|
870 |
+
]
|
871 |
+
},
|
872 |
+
{
|
873 |
+
"cell_type": "code",
|
874 |
+
"execution_count": 54,
|
875 |
+
"metadata": {},
|
876 |
+
"outputs": [
|
877 |
+
{
|
878 |
+
"name": "stdout",
|
879 |
+
"output_type": "stream",
|
880 |
+
"text": [
|
881 |
+
"Failed to clear tasks in Notion: 'PagesEndpoint' object has no attribute 'delete'\n",
|
882 |
+
"Task pushed to Notion: 14db2f92-b994-81e4-b057-f312d91ce256\n",
|
883 |
+
"Task pushed to Notion: 14db2f92-b994-8130-a8a8-d6c5933a5ddf\n",
|
884 |
+
"Task pushed to Notion: 14db2f92-b994-816e-8816-d17e67a74c93\n",
|
885 |
+
"Task pushed to Notion: 14db2f92-b994-812e-b042-ea13beed124b\n",
|
886 |
+
"Task pushed to Notion: 14db2f92-b994-8110-bf20-ff01d43b2e57\n",
|
887 |
+
"Task pushed to Notion: 14db2f92-b994-8187-8fee-ee2477162d77\n",
|
888 |
+
"Task pushed to Notion: 14db2f92-b994-81df-b592-d5f838200d56\n",
|
889 |
+
"Task pushed to Notion: 14db2f92-b994-811b-8fb5-d2221492bb52\n",
|
890 |
+
"Task pushed to Notion: 14db2f92-b994-819d-b72c-cb380ffaa08c\n",
|
891 |
+
"Task pushed to Notion: 14db2f92-b994-81f5-be9a-ead09cd9eac5\n",
|
892 |
+
"Task pushed to Notion: 14db2f92-b994-81bd-8e24-f3a789c0ad37\n",
|
893 |
+
"Task pushed to Notion: 14db2f92-b994-816d-a458-ec4af0da65c7\n",
|
894 |
+
"Task pushed to Notion: 14db2f92-b994-813d-bbbd-dd17295e98bd\n",
|
895 |
+
"Task pushed to Notion: 14db2f92-b994-81c0-b7e5-f60003ca6d8b\n",
|
896 |
+
"Task pushed to Notion: 14db2f92-b994-8155-af5d-d4e674bd73fe\n",
|
897 |
+
"Task pushed to Notion: 14db2f92-b994-8102-899e-dcd990824e7f\n",
|
898 |
+
"Task pushed to Notion: 14db2f92-b994-81a0-9e10-ebb6e3578d4a\n",
|
899 |
+
"Task pushed to Notion: 14db2f92-b994-8114-a190-d8f23b824d6e\n"
|
900 |
+
]
|
901 |
+
}
|
902 |
+
],
|
903 |
+
"source": [
|
904 |
+
"push_to_notion(latest_entry)"
|
905 |
+
]
|
906 |
+
},
|
907 |
+
{
|
908 |
+
"cell_type": "code",
|
909 |
+
"execution_count": 30,
|
910 |
+
"metadata": {},
|
911 |
+
"outputs": [
|
912 |
+
{
|
913 |
+
"ename": "AttributeError",
|
914 |
+
"evalue": "'function' object has no attribute 'get'",
|
915 |
+
"output_type": "error",
|
916 |
+
"traceback": [
|
917 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
918 |
+
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
|
919 |
+
"Cell \u001b[0;32mIn[30], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m tasks \u001b[38;5;241m=\u001b[39m \u001b[43mfinal_taks\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mconsolidated_final_task\u001b[39m\u001b[38;5;124m\"\u001b[39m, {})\n\u001b[1;32m 2\u001b[0m created_at \u001b[38;5;241m=\u001b[39m final_taks\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcreated_at\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m)\n",
|
920 |
+
"\u001b[0;31mAttributeError\u001b[0m: 'function' object has no attribute 'get'"
|
921 |
+
]
|
922 |
+
}
|
923 |
+
],
|
924 |
+
"source": [
|
925 |
+
"tasks = final_taks.get(\"consolidated_final_task\", {})\n",
|
926 |
+
"created_at = final_taks.get(\"created_at\", None)"
|
927 |
+
]
|
928 |
+
},
|
929 |
+
{
|
930 |
+
"cell_type": "code",
|
931 |
+
"execution_count": 29,
|
932 |
+
"metadata": {},
|
933 |
+
"outputs": [
|
934 |
+
{
|
935 |
+
"data": {
|
936 |
+
"text/plain": [
|
937 |
+
"<function __main__.fetch_latest_task_entry()>"
|
938 |
+
]
|
939 |
+
},
|
940 |
+
"execution_count": 29,
|
941 |
+
"metadata": {},
|
942 |
+
"output_type": "execute_result"
|
943 |
+
}
|
944 |
+
],
|
945 |
+
"source": [
|
946 |
+
"final_taks"
|
947 |
+
]
|
948 |
+
},
|
949 |
+
{
|
950 |
+
"cell_type": "code",
|
951 |
+
"execution_count": 7,
|
952 |
+
"metadata": {},
|
953 |
+
"outputs": [],
|
954 |
+
"source": [
|
955 |
+
"from pymongo import DESCENDING\n",
|
956 |
+
"def fetch_recent_two_entries():\n",
|
957 |
+
" \"\"\"\n",
|
958 |
+
" Fetch the two most recent entries from the weekly_tasks collection\n",
|
959 |
+
" based on the created_at timestamp.\n",
|
960 |
+
" \n",
|
961 |
+
" Returns:\n",
|
962 |
+
" list: A list of the two most recent documents from the collection.\n",
|
963 |
+
" \"\"\"\n",
|
964 |
+
" db = get_database()\n",
|
965 |
+
" collection = db[\"weekly_tasks\"]\n",
|
966 |
+
"\n",
|
967 |
+
" # Query to fetch the two most recent entries\n",
|
968 |
+
" recent_entries = list(\n",
|
969 |
+
" collection.find().sort(\"created_at\", DESCENDING).limit(2)\n",
|
970 |
+
" )\n",
|
971 |
+
" return recent_entries"
|
972 |
+
]
|
973 |
+
},
|
974 |
+
{
|
975 |
+
"cell_type": "code",
|
976 |
+
"execution_count": 9,
|
977 |
+
"metadata": {},
|
978 |
+
"outputs": [
|
979 |
+
{
|
980 |
+
"name": "stdout",
|
981 |
+
"output_type": "stream",
|
982 |
+
"text": [
|
983 |
+
"{'_id': ObjectId('6746fead3d4e2084b16de53d'), 'week': '2024_Week_48', 'unique_id': '79f4c789-31d9-493b-948b-1a714935de86', 'tasks': {'Bonnie Plans': {'Task-1': {'description': 'Vivek mentioned that everything needed from Bonnie Plans is completed', 'priority': 'high', 'assigned_to': 'Nikate', 'current_status': 'completed'}}, 'RAG Article and Blog': {'Task-1': {'description': 'Add content to the RAG article and blog, and seek feedback', 'priority': 'high', 'assigned_to': 'Shahid S', 'current_status': 'in progress'}, 'Task-2': {'description': 'Review RAG documentation and provide feedback', 'priority': 'high', 'assigned_to': 'Vivek', 'current_status': 'pending'}}, 'G Copilot Case Study': {'Task-1': {'description': 'Prepare G Copilot case study based on templates', 'priority': 'medium', 'assigned_to': 'Shahid S', 'current_status': 'in progress'}, 'Task-2': {'description': 'Provide feedback on G Copilot case study', 'priority': 'medium', 'assigned_to': 'Vivek', 'current_status': 'pending'}}, 'Internal Tool': {'Task-1': {'description': 'Conceptualize internal tool and present high-level ideas', 'priority': 'low', 'assigned_to': 'Shahid S', 'current_status': 'pending'}}, \"Jaspreet's Projects\": {'Task-1': {'description': \"Access Github repo and review Jaspreet's code\", 'priority': 'medium', 'assigned_to': 'Shahid S', 'current_status': 'pending'}}, 'Washington Government Project': {'Task-1': {'description': 'Write a high-level case study for the Washington Government project', 'priority': 'high', 'assigned_to': 'Shahid S', 'current_status': 'pending'}}, 'Marketing and Website Development': {'Task-1': {'description': 'Scope the requirements for the new website development', 'priority': 'medium', 'assigned_to': 'Shahid S', 'current_status': 'pending'}}, 'BFSI and AI Use Cases': {'Task-1': {'description': 'Discuss BFSI use cases and explore edge cases in AI implementations', 'priority': 'medium', 'assigned_to': 'Shahid S', 'current_status': 'in progress'}}}, 'created_at': datetime.datetime(2024, 11, 27, 16, 42, 45, 178000)}\n",
|
984 |
+
"{'_id': ObjectId('6746feaa3d4e2084b16de53b'), 'week': '2024_Week_48', 'unique_id': '2cf272c8-be42-423c-bba2-75f0202499b7', 'tasks': {'Bonnie Plans': {'Task-1': {'description': 'Vivek mentioned that everything needed from Bonnie Plans is completed', 'priority': 'high', 'assigned_to': 'Nikate', 'current_status': 'completed'}}, 'RAG Article and Blog': {'Task-1': {'description': 'Add content to the RAG article and blog, and seek feedback', 'priority': 'high', 'assigned_to': 'Shahid S', 'current_status': 'in progress'}, 'Task-2': {'description': 'Review RAG documentation and provide feedback', 'priority': 'high', 'assigned_to': 'Vivek', 'current_status': 'pending'}}, 'G Copilot Case Study': {'Task-1': {'description': 'Prepare G Copilot case study based on templates', 'priority': 'medium', 'assigned_to': 'Shahid S', 'current_status': 'in progress'}, 'Task-2': {'description': 'Provide feedback on G Copilot case study', 'priority': 'medium', 'assigned_to': 'Vivek', 'current_status': 'pending'}}, 'Internal Tool': {'Task-1': {'description': 'Conceptualize internal tool and present high-level ideas', 'priority': 'low', 'assigned_to': 'Shahid S', 'current_status': 'pending'}}, \"Jaspreet's Projects\": {'Task-1': {'description': \"Access Github repo and review Jaspreet's code\", 'priority': 'medium', 'assigned_to': 'Shahid S', 'current_status': 'pending'}}, 'Washington Government Project': {'Task-1': {'description': 'Write a high-level case study for the Washington Government project', 'priority': 'high', 'assigned_to': 'Shahid S', 'current_status': 'pending'}}, 'Marketing and Website Development': {'Task-1': {'description': 'Scope the requirements for the new website development', 'priority': 'medium', 'assigned_to': 'Shahid S', 'current_status': 'pending'}}, 'BFSI and AI Use Cases': {'Task-1': {'description': 'Discuss BFSI use cases and explore edge cases in AI implementations', 'priority': 'medium', 'assigned_to': 'Shahid S', 'current_status': 'in progress'}}}, 'created_at': datetime.datetime(2024, 11, 27, 16, 42, 42, 919000)}\n"
|
985 |
+
]
|
986 |
+
}
|
987 |
+
],
|
988 |
+
"source": [
|
989 |
+
"recent_tasks = fetch_recent_two_entries()\n",
|
990 |
+
"for task in recent_tasks:\n",
|
991 |
+
" print(task)"
|
992 |
+
]
|
993 |
+
},
|
994 |
+
{
|
995 |
+
"cell_type": "markdown",
|
996 |
+
"metadata": {},
|
997 |
+
"source": [
|
998 |
+
"#####################"
|
999 |
+
]
|
1000 |
+
},
|
1001 |
+
{
|
1002 |
+
"cell_type": "code",
|
1003 |
+
"execution_count": 11,
|
1004 |
+
"metadata": {},
|
1005 |
+
"outputs": [],
|
1006 |
+
"source": [
|
1007 |
+
"from pymongo import DESCENDING\n",
|
1008 |
+
"db = get_database()\n",
|
1009 |
+
"collection = db[\"weekly_tasks\"]\n",
|
1010 |
+
"\n",
|
1011 |
+
"# Query to fetch the two most recent entries\n",
|
1012 |
+
"recent_entries = list(\n",
|
1013 |
+
" collection.find().sort(\"created_at\", DESCENDING).limit(2)\n",
|
1014 |
+
")\n",
|
1015 |
+
"# Extract task data from the entries\n",
|
1016 |
+
"old_task_data = recent_entries[1][\"tasks\"] # Older entry\n",
|
1017 |
+
"new_task_data = recent_entries[0][\"tasks\"] "
|
1018 |
+
]
|
1019 |
+
},
|
1020 |
+
{
|
1021 |
+
"cell_type": "code",
|
1022 |
+
"execution_count": 21,
|
1023 |
+
"metadata": {},
|
1024 |
+
"outputs": [],
|
1025 |
+
"source": [
|
1026 |
+
"from openai import OpenAI\n",
|
1027 |
+
"def compare_task_data(old_task_data, new_task_data):\n",
|
1028 |
+
" \"\"\"\n",
|
1029 |
+
" Send old and new task data to the LLM for comparison.\n",
|
1030 |
+
"\n",
|
1031 |
+
" Args:\n",
|
1032 |
+
" old_task_data (dict): JSON data for the older tasks.\n",
|
1033 |
+
" new_task_data (dict): JSON data for the newer tasks.\n",
|
1034 |
+
"\n",
|
1035 |
+
" Returns:\n",
|
1036 |
+
" dict: Consolidated JSON with updates and new tasks.\n",
|
1037 |
+
" \"\"\"\n",
|
1038 |
+
" # Prepare the prompt\n",
|
1039 |
+
" prompt = f\"\"\"\n",
|
1040 |
+
"\n",
|
1041 |
+
" Given the following two sets of task JSON data, compare them and:\n",
|
1042 |
+
"\n",
|
1043 |
+
" 1. Identify projects and tasks present in the second JSON but not in the first. \n",
|
1044 |
+
" - If two projects have different names but are contextually similar (e.g., due to spelling differences or tasks), treat them as the same project and merge their tasks.\n",
|
1045 |
+
"\n",
|
1046 |
+
" 2. For tasks that exist in both JSONs within the same project:\n",
|
1047 |
+
" - Compare the following fields:\n",
|
1048 |
+
" - \"description\"\n",
|
1049 |
+
" - \"priority\"\n",
|
1050 |
+
" - \"assigned_to\"\n",
|
1051 |
+
" - \"current_status\"\n",
|
1052 |
+
" - If any changes are detected in these fields, update the task details in the output.\n",
|
1053 |
+
"\n",
|
1054 |
+
" 3. If a project or task in the second JSON contains new tasks or subtasks not present in the first JSON:\n",
|
1055 |
+
" - Add those tasks or subtasks to the corresponding project in the output.\n",
|
1056 |
+
"\n",
|
1057 |
+
" 4. Ensure the final JSON structure meets the following conditions:\n",
|
1058 |
+
" - Each project appears only once in the JSON.\n",
|
1059 |
+
" - All tasks are uniquely represented under their respective projects.\n",
|
1060 |
+
" - Updates to tasks (e.g., changes in \"priority\", \"assigned_to\", or \"current_status\") are applied.\n",
|
1061 |
+
" - Tasks or subtasks are not duplicated across the output.\n",
|
1062 |
+
"\n",
|
1063 |
+
" FIRST TASK DATA:\n",
|
1064 |
+
" '''\n",
|
1065 |
+
" {old_task_data}\n",
|
1066 |
+
" '''\n",
|
1067 |
+
"\n",
|
1068 |
+
" SECOND TASK DATA:\n",
|
1069 |
+
" '''\n",
|
1070 |
+
" {new_task_data}\n",
|
1071 |
+
" '''\n",
|
1072 |
+
"\n",
|
1073 |
+
" Expected Output:\n",
|
1074 |
+
" A single consolidated JSON structure where:\n",
|
1075 |
+
" - Projects are uniquely represented and merged based on contextual similarity.\n",
|
1076 |
+
" - Each project contains all relevant tasks, including updates and newly added ones.\n",
|
1077 |
+
" - All tasks follow this structure:\n",
|
1078 |
+
"\n",
|
1079 |
+
" Return a single consolidated JSON structure with:\n",
|
1080 |
+
" {{\n",
|
1081 |
+
" \"project_name_1\": {{\n",
|
1082 |
+
" \"Task-1\": {{\n",
|
1083 |
+
" \"description\": \"Brief description of the task\",\n",
|
1084 |
+
" \"priority\": \"high/medium/low\",\n",
|
1085 |
+
" \"assigned_to\": \"Person responsible\",\n",
|
1086 |
+
" \"current_status\": \"Status of the task (e.g., completed, in progress, pending)\"\n",
|
1087 |
+
" }},\n",
|
1088 |
+
" \"Task-2\": {{\n",
|
1089 |
+
" \"description\": \"Brief description of the task\",\n",
|
1090 |
+
" \"priority\": \"high/medium/low\",\n",
|
1091 |
+
" \"assigned_to\": \"Person responsible\",\n",
|
1092 |
+
" \"current_status\": \"Status of the task (e.g., completed, in progress, pending)\"\n",
|
1093 |
+
" }}\n",
|
1094 |
+
" }},\n",
|
1095 |
+
" \"project_name_2\": {{\n",
|
1096 |
+
" \"Task-1\": {{\n",
|
1097 |
+
" \"description\": \"Brief description of the task\",\n",
|
1098 |
+
" \"priority\": \"high/medium/low\",\n",
|
1099 |
+
" \"assigned_to\": \"Person responsible\",\n",
|
1100 |
+
" \"current_status\": \"Status of the task (e.g., completed, in progress, pending)\"\n",
|
1101 |
+
" }}\n",
|
1102 |
+
" }}\n",
|
1103 |
+
" }}\n",
|
1104 |
+
" \"\"\"\n",
|
1105 |
+
"\n",
|
1106 |
+
"\n",
|
1107 |
+
" client = OpenAI(api_key='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')\n",
|
1108 |
+
" \n",
|
1109 |
+
" stream = client.chat.completions.create(\n",
|
1110 |
+
" model=\"gpt-4o\",\n",
|
1111 |
+
" messages=[{\"role\": \"user\", \"content\":prompt}],\n",
|
1112 |
+
" # stream=True,\n",
|
1113 |
+
" )\n",
|
1114 |
+
" raw_response = stream.choices[0].message.content\n",
|
1115 |
+
" # final_response= extract_json_from_raw_response(raw_response)\n",
|
1116 |
+
" return raw_response"
|
1117 |
+
]
|
1118 |
+
},
|
1119 |
+
{
|
1120 |
+
"cell_type": "code",
|
1121 |
+
"execution_count": 22,
|
1122 |
+
"metadata": {},
|
1123 |
+
"outputs": [],
|
1124 |
+
"source": [
|
1125 |
+
"consolidated_json=compare_task_data(old_task_data,new_task_data)"
|
1126 |
+
]
|
1127 |
+
},
|
1128 |
+
{
|
1129 |
+
"cell_type": "code",
|
1130 |
+
"execution_count": 23,
|
1131 |
+
"metadata": {},
|
1132 |
+
"outputs": [
|
1133 |
+
{
|
1134 |
+
"name": "stdout",
|
1135 |
+
"output_type": "stream",
|
1136 |
+
"text": [
|
1137 |
+
"Here is the consolidated JSON structure after comparing and merging the two sets of task data:\n",
|
1138 |
+
"\n",
|
1139 |
+
"```json\n",
|
1140 |
+
"{\n",
|
1141 |
+
" \"Bonnie Plans\": {\n",
|
1142 |
+
" \"Task-1\": {\n",
|
1143 |
+
" \"description\": \"Complete the Bony Plants project.\",\n",
|
1144 |
+
" \"priority\": \"high\",\n",
|
1145 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1146 |
+
" \"current_status\": \"completed\"\n",
|
1147 |
+
" }\n",
|
1148 |
+
" },\n",
|
1149 |
+
" \"RAG Article and Blog\": {\n",
|
1150 |
+
" \"Task-1\": {\n",
|
1151 |
+
" \"description\": \"Add content to the RAG article and blog, and seek feedback\",\n",
|
1152 |
+
" \"priority\": \"high\",\n",
|
1153 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1154 |
+
" \"current_status\": \"in progress\"\n",
|
1155 |
+
" },\n",
|
1156 |
+
" \"Task-2\": {\n",
|
1157 |
+
" \"description\": \"Review RAG documentation and provide feedback\",\n",
|
1158 |
+
" \"priority\": \"high\",\n",
|
1159 |
+
" \"assigned_to\": \"Vivek\",\n",
|
1160 |
+
" \"current_status\": \"pending\"\n",
|
1161 |
+
" }\n",
|
1162 |
+
" },\n",
|
1163 |
+
" \"G Copilot Case Study\": {\n",
|
1164 |
+
" \"Task-1\": {\n",
|
1165 |
+
" \"description\": \"Draft the initial case study document for G Copilot.\",\n",
|
1166 |
+
" \"priority\": \"medium\",\n",
|
1167 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1168 |
+
" \"current_status\": \"pending\"\n",
|
1169 |
+
" },\n",
|
1170 |
+
" \"Task-2\": {\n",
|
1171 |
+
" \"description\": \"Provide feedback on G Copilot case study\",\n",
|
1172 |
+
" \"priority\": \"medium\",\n",
|
1173 |
+
" \"assigned_to\": \"Vivek\",\n",
|
1174 |
+
" \"current_status\": \"pending\"\n",
|
1175 |
+
" }\n",
|
1176 |
+
" },\n",
|
1177 |
+
" \"Internal Tool\": {\n",
|
1178 |
+
" \"Task-1\": {\n",
|
1179 |
+
" \"description\": \"Conceptualize internal tool and present high-level ideas\",\n",
|
1180 |
+
" \"priority\": \"low\",\n",
|
1181 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1182 |
+
" \"current_status\": \"pending\"\n",
|
1183 |
+
" }\n",
|
1184 |
+
" },\n",
|
1185 |
+
" \"Jaspreet's Projects\": {\n",
|
1186 |
+
" \"Task-1\": {\n",
|
1187 |
+
" \"description\": \"Access Github repo and review Jaspreet's code\",\n",
|
1188 |
+
" \"priority\": \"medium\",\n",
|
1189 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1190 |
+
" \"current_status\": \"pending\"\n",
|
1191 |
+
" }\n",
|
1192 |
+
" },\n",
|
1193 |
+
" \"Washington Government Project\": {\n",
|
1194 |
+
" \"Task-1\": {\n",
|
1195 |
+
" \"description\": \"Submit the project for review to the Applore team.\",\n",
|
1196 |
+
" \"priority\": \"high\",\n",
|
1197 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1198 |
+
" \"current_status\": \"in progress\"\n",
|
1199 |
+
" },\n",
|
1200 |
+
" \"Task-2\": {\n",
|
1201 |
+
" \"description\": \"Ensure reminders are sent daily to follow up on the review.\",\n",
|
1202 |
+
" \"priority\": \"high\",\n",
|
1203 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1204 |
+
" \"current_status\": \"pending\"\n",
|
1205 |
+
" }\n",
|
1206 |
+
" },\n",
|
1207 |
+
" \"Marketing and Website Development\": {\n",
|
1208 |
+
" \"Task-1\": {\n",
|
1209 |
+
" \"description\": \"Create a high-level roadmap and plan milestones for the project.\",\n",
|
1210 |
+
" \"priority\": \"medium\",\n",
|
1211 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1212 |
+
" \"current_status\": \"in progress\"\n",
|
1213 |
+
" }\n",
|
1214 |
+
" },\n",
|
1215 |
+
" \"BFSI and AI Use Cases\": {\n",
|
1216 |
+
" \"Task-1\": {\n",
|
1217 |
+
" \"description\": \"Discuss BFSI use cases and explore edge cases in AI implementations\",\n",
|
1218 |
+
" \"priority\": \"medium\",\n",
|
1219 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1220 |
+
" \"current_status\": \"in progress\"\n",
|
1221 |
+
" }\n",
|
1222 |
+
" },\n",
|
1223 |
+
" \"S3 R3 Project\": {\n",
|
1224 |
+
" \"Task-1\": {\n",
|
1225 |
+
" \"description\": \"Discuss RTF S3 R3 alarm with the team once feedback is received.\",\n",
|
1226 |
+
" \"priority\": \"medium\",\n",
|
1227 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1228 |
+
" \"current_status\": \"pending\"\n",
|
1229 |
+
" }\n",
|
1230 |
+
" },\n",
|
1231 |
+
" \"Grant Engine and RAG Solution\": {\n",
|
1232 |
+
" \"Task-1\": {\n",
|
1233 |
+
" \"description\": \"Draft the use cases document for the Grant Engine and RAG Solution.\",\n",
|
1234 |
+
" \"priority\": \"medium\",\n",
|
1235 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1236 |
+
" \"current_status\": \"in progress\"\n",
|
1237 |
+
" }\n",
|
1238 |
+
" },\n",
|
1239 |
+
" \"Internal LLM Comparison Tool\": {\n",
|
1240 |
+
" \"Task-1\": {\n",
|
1241 |
+
" \"description\": \"Define metrics and process for comparing LLMs using business problem statements.\",\n",
|
1242 |
+
" \"priority\": \"medium\",\n",
|
1243 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1244 |
+
" \"current_status\": \"in progress\"\n",
|
1245 |
+
" }\n",
|
1246 |
+
" },\n",
|
1247 |
+
" \"Task Manager and Plugins\": {\n",
|
1248 |
+
" \"Task-1\": {\n",
|
1249 |
+
" \"description\": \"Develop a task manager prototype for better project management and communication.\",\n",
|
1250 |
+
" \"priority\": \"high\",\n",
|
1251 |
+
" \"assigned_to\": \"Shahid S\",\n",
|
1252 |
+
" \"current_status\": \"pending\"\n",
|
1253 |
+
" }\n",
|
1254 |
+
" }\n",
|
1255 |
+
"}\n",
|
1256 |
+
"```\n",
|
1257 |
+
"\n",
|
1258 |
+
"### Explanation\n",
|
1259 |
+
"1. **Project merging based on contextual similarity**:\n",
|
1260 |
+
" - \"Bonnie Plans\" and \"Bony Plants\" were treated as the same project based on spelling similarity.\n",
|
1261 |
+
"\n",
|
1262 |
+
"2. **Task merging and updating**:\n",
|
1263 |
+
" - Tasks were merged and updated based on field changes in descriptions, priority, assigned_to, and current_status.\n",
|
1264 |
+
"\n",
|
1265 |
+
"3. **Addition of new projects and tasks**:\n",
|
1266 |
+
" - New projects and tasks from the second JSON that were not present in the first JSON were added to the consolidated output.\n"
|
1267 |
+
]
|
1268 |
+
}
|
1269 |
+
],
|
1270 |
+
"source": [
|
1271 |
+
"print(consolidated_json)"
|
1272 |
+
]
|
1273 |
+
},
|
1274 |
+
{
|
1275 |
+
"cell_type": "code",
|
1276 |
+
"execution_count": null,
|
1277 |
+
"metadata": {},
|
1278 |
+
"outputs": [],
|
1279 |
+
"source": []
|
1280 |
+
}
|
1281 |
+
],
|
1282 |
+
"metadata": {
|
1283 |
+
"kernelspec": {
|
1284 |
+
"display_name": "Python 3",
|
1285 |
+
"language": "python",
|
1286 |
+
"name": "python3"
|
1287 |
+
},
|
1288 |
+
"language_info": {
|
1289 |
+
"codemirror_mode": {
|
1290 |
+
"name": "ipython",
|
1291 |
+
"version": 3
|
1292 |
+
},
|
1293 |
+
"file_extension": ".py",
|
1294 |
+
"mimetype": "text/x-python",
|
1295 |
+
"name": "python",
|
1296 |
+
"nbconvert_exporter": "python",
|
1297 |
+
"pygments_lexer": "ipython3",
|
1298 |
+
"version": "3.9.6"
|
1299 |
+
}
|
1300 |
+
},
|
1301 |
+
"nbformat": 4,
|
1302 |
+
"nbformat_minor": 2
|
1303 |
+
}
|
test.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
# Initialize session state to track the active tab
|
4 |
+
if 'active_tab' not in st.session_state:
|
5 |
+
st.session_state.active_tab = 0 # Set default tab (0 for first tab)
|
6 |
+
|
7 |
+
# Create tabs using st.radio or st.selectbox
|
8 |
+
tab_options = ['Tab 1', 'Tab 2', 'Tab 3']
|
9 |
+
selected_tab = st.radio('Select a tab', tab_options, index=st.session_state.active_tab)
|
10 |
+
# Some condition to trigger the tab change (for example, based on user input or action)
|
11 |
+
def some_condition():
|
12 |
+
# Define your condition, for example, a button press or other condition
|
13 |
+
return st.button('Switch to Tab 2')
|
14 |
+
# Conditionally switch tabs based on a condition
|
15 |
+
if some_condition():
|
16 |
+
st.session_state.active_tab = 1 # Switch to Tab 2 (index 1)
|
17 |
+
|
18 |
+
# Render content based on the selected tab
|
19 |
+
if selected_tab == 'Tab 1':
|
20 |
+
st.write("You are on Tab 1")
|
21 |
+
elif selected_tab == 'Tab 2':
|
22 |
+
st.write("You are on Tab 2")
|
23 |
+
else:
|
24 |
+
st.write("You are on Tab 3")
|
25 |
+
|
26 |
+
|