File size: 2,630 Bytes
7a6c758
 
 
 
 
 
dc672d9
7a6c758
 
 
 
 
 
 
 
 
 
dc672d9
7a6c758
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc672d9
7a6c758
 
 
 
dc672d9
7a6c758
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
"""
OpenAI GPT Implementation
"""
from openai import AsyncOpenAI
from typing import Dict, List, Any
from llm_interface import LLMInterface
from logger import log_info, log_error, log_warning, log_debug

class OpenAILLM(LLMInterface):
    """OpenAI GPT integration (GPT-4o, GPT-4o-mini)"""
    
    def __init__(self, api_key: str, model: str, settings: Dict[str, Any] = None):
        super().__init__(settings)
        self.client = AsyncOpenAI(api_key=api_key)
        self.model = self._map_model_name(model)
        self.temperature = settings.get("temperature", 0.7) if settings else 0.7
        self.max_tokens = settings.get("max_tokens", 1000) if settings else 1000
        log_info(f"πŸ€– Initialized OpenAI LLM with model: {self.model}")
    
    def _map_model_name(self, model: str) -> str:
        """Map provider name to actual model name"""
        mappings = {
            "gpt4o": "gpt-4",
            "gpt4o-mini": "gpt-4o-mini"
        }
        return mappings.get(model, model)
    
    async def generate(self, system_prompt: str, user_input: str, context: List[Dict]) -> str:
        """Generate response from OpenAI"""
        try:
            # Build messages
            messages = [{"role": "system", "content": system_prompt}]
            
            # Add context
            for msg in context:
                messages.append({
                    "role": msg.get("role", "user"),
                    "content": msg.get("content", "")
                })
            
            # Add current user input
            messages.append({"role": "user", "content": user_input})
            
            # Call OpenAI
            response = await self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                temperature=self.temperature,
                max_tokens=self.max_tokens
            )
            
            return response.choices[0].message.content.strip()
        except Exception as e:
            log_error("❌ OpenAI error", e)
            raise
    
    async def startup(self, project_config: Dict) -> bool:
        """GPT doesn't need startup, always return True"""
        log_info("βœ… GPT provider ready (no startup needed)")
        return True
    
    def get_provider_name(self) -> str:
        """Get provider name"""
        return self.model
    
    def get_model_info(self) -> Dict[str, Any]:
        """Get model information"""
        return {
            "provider": "openai",
            "model": self.model,
            "temperature": self.temperature,
            "max_tokens": self.max_tokens
        }