Smart-Tasker / llm_integration /task_extraction.py
Shahid
Added first commit
af30a30
# llm_integration/task_extraction.py
from openai import OpenAI
import re
import json
def extract_json_from_raw_response(raw_response):
"""
Extract the JSON part from the raw response string.
Args:
raw_response (str): The raw response from the LLM containing JSON and additional text.
Returns:
dict: Parsed JSON object.
"""
# Use regex to extract the JSON block between ```json and ```
match = re.search(r"```json(.*?)```", raw_response, re.DOTALL)
if match:
json_string = match.group(1).strip() # Extract the matched JSON part
try:
json_data = json.loads(json_string) # Parse the JSON string into a Python dictionary
return json_data
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
return None
else:
print("No valid JSON block found in the response.")
return None
def extract_tasks_from_text(conversation_text):
"""Send conversation text to the LLM and extract tasks in JSON format."""
# Define the prompt
prompt = f"""
Extract detailed project information from the following text and structure it in JSON format.
The JSON should have each project as a main key, with tasks as subkeys. For each task, include
the following fields: "description", "priority", "assigned_to", and "current_status".
Use the conversation details to populate the values accurately.
Text:
'''
{conversation_text}
'''
Expected JSON Output:
{{
"project_name_1": {{
"Task-1": {{
"description": "Brief description of the task",
"priority": "high/medium/low",
"assigned_to": "Person responsible",
"current_status": "Status of the task (e.g., completed, in progress, pending)"
}},
"Task-2": {{
"description": "Brief description of the task",
"priority": "high/medium/low",
"assigned_to": "Person responsible",
"current_status": "Status of the task (e.g., completed, in progress, pending)"
}}
}},
"project_name_2": {{
"Task-1": {{
"description": "Brief description of the task",
"priority": "high/medium/low",
"assigned_to": "Person responsible",
"current_status": "Status of the task (e.g., completed, in progress, pending)"
}}
}}
}}
"""
client = OpenAI(api_key='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')
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content":prompt}],
# stream=True,
)
raw_response = stream.choices[0].message.content
final_response= extract_json_from_raw_response(raw_response)
return final_response