|
from typing import Optional, List, Dict, Any |
|
import openai |
|
import anthropic |
|
from dataclasses import dataclass |
|
from config.llm_settings import LLMSettings |
|
from core.file_scanner import FileInfo |
|
|
|
@dataclass |
|
class Message: |
|
role: str |
|
content: str |
|
|
|
class LLMService: |
|
MAX_TURNS = 5 |
|
|
|
def __init__(self): |
|
"""LLMサービスの初期化""" |
|
self.settings = LLMSettings() |
|
self.current_model = self.settings.default_llm |
|
|
|
|
|
if self.settings.anthropic_api_key: |
|
self.claude_client = anthropic.Anthropic(api_key=self.settings.anthropic_api_key) |
|
if self.settings.openai_api_key: |
|
openai.api_key = self.settings.openai_api_key |
|
|
|
self.conversation_history: List[Message] = [] |
|
|
|
def switch_model(self, model: str): |
|
"""使用するモデルを切り替え""" |
|
if model not in self.settings.get_available_models(): |
|
raise ValueError(f"モデル {model} は利用できません") |
|
self.current_model = model |
|
|
|
def create_prompt(self, content: str, query: str) -> str: |
|
"""プロンプトを生成""" |
|
return f"""以下はGitHubリポジトリのコード解析結果です。このコードについて質問に答えてください。 |
|
|
|
コード解析結果: |
|
{content} |
|
|
|
質問: {query} |
|
|
|
できるだけ具体的に、コードの内容を参照しながら回答してください。""" |
|
|
|
def _add_to_history(self, role: str, content: str): |
|
"""会話履歴に追加(最大5ターン)""" |
|
self.conversation_history.append(Message(role=role, content=content)) |
|
|
|
if len(self.conversation_history) > self.MAX_TURNS * 2: |
|
self.conversation_history = self.conversation_history[-self.MAX_TURNS * 2:] |
|
|
|
def _format_messages_for_claude(self) -> List[Dict[str, str]]: |
|
"""Claude用にメッセージをフォーマット""" |
|
return [{"role": msg.role, "content": msg.content} |
|
for msg in self.conversation_history] |
|
|
|
def _format_messages_for_gpt(self) -> List[Dict[str, str]]: |
|
"""GPT用にメッセージをフォーマット""" |
|
return [ |
|
{"role": "system", "content": "あなたはコードアナリストとして、リポジトリの解析と質問への回答を行います。"}, |
|
*[{"role": msg.role, "content": msg.content} |
|
for msg in self.conversation_history] |
|
] |
|
|
|
def get_conversation_history(self) -> List[Dict[str, str]]: |
|
"""会話履歴を取得""" |
|
return [{"role": msg.role, "content": msg.content} |
|
for msg in self.conversation_history] |
|
|
|
def clear_history(self): |
|
"""会話履歴をクリア""" |
|
self.conversation_history = [] |
|
|
|
def get_response(self, content: str, query: str) -> tuple[Optional[str], Optional[str]]: |
|
"""LLMを使用して回答を生成""" |
|
try: |
|
prompt = self.create_prompt(content, query) |
|
self._add_to_history("user", prompt) |
|
|
|
if self.current_model == 'claude': |
|
response = self.claude_client.messages.create( |
|
model="claude-3-sonnet-20240229", |
|
max_tokens=4000, |
|
messages=self._format_messages_for_claude() |
|
) |
|
answer = response.content[0].text |
|
|
|
else: |
|
response = openai.ChatCompletion.create( |
|
model="gpt-4o", |
|
messages=self._format_messages_for_gpt() |
|
) |
|
answer = response.choices[0].message.content |
|
|
|
self._add_to_history("assistant", answer) |
|
return answer, None |
|
|
|
except Exception as e: |
|
return None, f"エラーが発生しました: {str(e)}" |
|
|
|
@staticmethod |
|
def format_code_content(files: List[FileInfo]) -> str: |
|
"""ファイル内容をプロンプト用にフォーマット""" |
|
formatted_content = [] |
|
for file_info in files: |
|
formatted_content.append( |
|
f"#ファイルパス\n{file_info.path}\n------------\n{file_info.content}\n" |
|
) |
|
return "\n".join(formatted_content) |