File size: 3,954 Bytes
88435ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
from __future__ import annotations

from typing import Any

from typing_extensions import TypedDict

from neollm import MyL3M2, MyLLM
from neollm.types import LLMSettings, Messages, Response

_MyLLM = MyLLM[Any, Any]
_MyL3M2 = MyL3M2[Any, Any]


class PromptCheckerInput(TypedDict):
    myllm: _MyLLM | _MyL3M2
    model: str
    platform: str
    llm_settings: LLMSettings | None


class APromptCheckerInput(TypedDict):
    myllm: _MyLLM


class APromptChecker(MyLLM[APromptCheckerInput, str]):
    def _preprocess(self, inputs: APromptCheckerInput) -> Messages:
        system_prompt = (
            "あなたは、AIへの指示(プロンプト)をより良くすることが仕事です。\n"
            "あなたは言語能力が非常に高く、仕事も丁寧なので小さなミスも気づくことができる天才です。"
            "誤字脱字・論理的でない点・指示が不明確な点を箇条書きで指摘し、より良いプロンプトを提案してください。\n"
            "# 出力例: \n"
            "[指示の誤字脱字/文法ミス]\n"
            "- ...\n"
            "- ...\n"
            "[指示が論理的でない点]\n"
            "- ...\n"
            "- ...\n"
            "[指示が不明確な点]\n"
            "- ...\n"
            "- ...\n"
            "[その他気になる点]\n"
            "- ...\n"
            "- ...\n"
            "[提案]\n"
            "- ...\n"
            "- ...\n"
        )
        if inputs["myllm"].messages is None:
            return []
        user_prompt = "# プロンプト\n" + "\n".join(
            # [f"<{message['role']}>\n{message['content']}\n" for message in inputs.messages]
            [str(message) for message in inputs["myllm"].messages]
        )
        messages: Messages = [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt},
        ]
        return messages

    def _postprocess(self, response: Response) -> str:
        if response.choices[0].message.content is None:
            return "contentがないンゴ"
        return response.choices[0].message.content

    def _ruleprocess(self, inputs: APromptCheckerInput) -> str | None:
        if inputs["myllm"].messages is None:
            return "ruleprocessが走って、リクエストしてないよ!"
        return None

    def __call__(self, inputs: APromptCheckerInput) -> str:
        outputs: str = super().__call__(inputs)
        return outputs


class PromptsChecker(MyL3M2[PromptCheckerInput, None]):
    def _link(self, inputs: PromptCheckerInput) -> None:
        if isinstance(inputs["myllm"], MyL3M2):
            for myllm in inputs["myllm"].myllm_list:
                prompts_checker = PromptsChecker(parent=self, verbose=True)
                prompts_checker(
                    inputs={
                        "myllm": myllm,
                        "model": inputs["model"],
                        "platform": inputs["platform"],
                        "llm_settings": inputs["llm_settings"],
                    }
                )
        elif isinstance(inputs["myllm"], MyLLM):
            a_prompt_checker = APromptChecker(
                parent=self,
                llm_settings=inputs["llm_settings"],
                verbose=True,
                platform=inputs["platform"],
                model=inputs["model"],
            )
            a_prompt_checker(inputs={"myllm": inputs["myllm"]})

    def __call__(self, inputs: PromptCheckerInput) -> None:
        super().__call__(inputs)


def check_prompt(
    myllm: _MyLLM | _MyL3M2,
    llm_settings: LLMSettings | None = None,
    model: str = "gpt-3.5-turbo",
    platform: str = "openai",
) -> MyL3M2[Any, Any]:
    prompt_checker_2 = PromptsChecker(verbose=True)
    prompt_checker_2(inputs={"myllm": myllm, "llm_settings": llm_settings, "model": model, "platform": platform})
    return prompt_checker_2