# api.py import openai import logging # Setup logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) class AutoAPI: def __init__(self, api_key, ai_name, ai_role, top_5_goals): self.api_key = api_key self.ai_name = ai_name self.ai_role = ai_role self.top_5_goals = top_5_goals openai.api_key = self.api_key def infer_tasks(self, description): prompt = f"Based on the role '{self.ai_role}' and the description '{description}', infer five delegated tasks that encompass all tasks and subtasks related to the deployment of the project." try: response = openai.ChatCompletion.create( model="gpt-3.5-turbo", # or the model you are using messages=[{"role": "user", "content": prompt}], max_tokens=150 ) tasks = response['choices'][0]['message']['content'] return tasks.split('\n') # Assuming tasks are returned as a newline-separated string except Exception as e: logger.error("Error inferring tasks: %s", str(e)) return ["Error inferring tasks. Please check the API key and input."]