File size: 7,964 Bytes
88435ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
from typing import Literal, cast

from openai import OpenAI

from neollm.llm.abstract_llm import AbstractLLM
from neollm.llm.gpt.abstract_gpt import AbstractGPT
from neollm.types import (
    APIPricing,
    ClientSettings,
    LLMSettings,
    Messages,
    Response,
    StreamResponse,
)
from neollm.utils.utils import cprint

# Models: https://platform.openai.com/docs/models/continuous-model-upgrades
# Pricing: https://openai.com/pricing

SUPPORTED_MODELS = Literal[
    "gpt-4o-2024-05-13",
    "gpt-4-turbo-2024-04-09",
    "gpt-3.5-turbo-0125",
    "gpt-4-turbo-0125",
    "gpt-3.5-turbo-1106",
    "gpt-4-turbo-1106",
    "gpt-4v-turbo-1106",
    "gpt-3.5-turbo-0613",
    "gpt-3.5-turbo-16k-0613",
    "gpt-4-0613",
    "gpt-4-32k-0613",
]


def get_openai_llm(model_name: SUPPORTED_MODELS | str, client_settings: ClientSettings) -> AbstractLLM:
    # Add 日付
    replace_map_for_nodate: dict[str, SUPPORTED_MODELS] = {
        "gpt-4o": "gpt-4o-2024-05-13",
        "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
        "gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613",
        "gpt-4": "gpt-4-0613",
        "gpt-4-32k": "gpt-4-32k-0613",
        "gpt-4-turbo": "gpt-4-turbo-1106",
        "gpt-4v-turbo": "gpt-4v-turbo-1106",
    }
    if model_name in replace_map_for_nodate:
        cprint("WARNING: model_nameに日付を指定してください", color="yellow", background=True)
        print(f"model_name: {model_name} -> {replace_map_for_nodate[model_name]}")
        model_name = replace_map_for_nodate[model_name]

    # map to LLM
    supported_model_map: dict[SUPPORTED_MODELS, AbstractLLM] = {
        "gpt-4o-2024-05-13": OpenAIGPT4O_20240513(client_settings),
        "gpt-4-turbo-2024-04-09": OpenAIGPT4T_20240409(client_settings),
        "gpt-3.5-turbo-0125": OpenAIGPT35T_0125(client_settings),
        "gpt-4-turbo-0125": OpenAIGPT4T_0125(client_settings),
        "gpt-3.5-turbo-1106": OpenAIGPT35T_1106(client_settings),
        "gpt-4-turbo-1106": OpenAIGPT4T_1106(client_settings),
        "gpt-4v-turbo-1106": OpenAIGPT4VT_1106(client_settings),
        "gpt-3.5-turbo-0613": OpenAIGPT35T_0613(client_settings),
        "gpt-3.5-turbo-16k-0613": OpenAIGPT35T16k_0613(client_settings),
        "gpt-4-0613": OpenAIGPT4_0613(client_settings),
        "gpt-4-32k-0613": OpenAIGPT432k_0613(client_settings),
    }
    # 通常モデル
    if model_name in supported_model_map:
        model_name = cast(SUPPORTED_MODELS, model_name)
        return supported_model_map[model_name]
    # FTモデル
    if "gpt-3.5-turbo-1106" in model_name:
        return OpenAIGPT35TFT_1106(model_name, client_settings)
    if "gpt-3.5-turbo-0613" in model_name:
        return OpenAIGPT35TFT_0613(model_name, client_settings)
    if "gpt-3.5-turbo-0125" in model_name:
        return OpenAIGPT35TFT_0125(model_name, client_settings)
    if "gpt4" in model_name.replace("-", ""):  # TODO! もっといい条件に修正
        return OpenAIGPT4FT_0613(model_name, client_settings)

    cprint(
        f"WARNING: このFTモデルは何?: {model_name} -> OpenAIGPT35TFT_1106として設定", color="yellow", background=True
    )
    return OpenAIGPT35TFT_1106(model_name, client_settings)


class OpenAILLM(AbstractGPT):
    model: str

    @property
    def client(self) -> OpenAI:
        client: OpenAI = OpenAI(**self.client_settings)
        # api_key: str | None = (None,)
        # timeout: httpx.Timeout(timeout=600.0, connect=5.0)
        # max_retries: int = 2
        return client

    def generate(self, messages: Messages, llm_settings: LLMSettings) -> Response:
        openai_response = self.client.chat.completions.create(
            model=self.model,
            messages=self._convert_to_platform_messages(messages),
            stream=False,
            **llm_settings,
        )
        response = self._convert_to_response(openai_response)
        return response

    def generate_stream(self, messages: Messages, llm_settings: LLMSettings) -> StreamResponse:
        platform_stream_response = self.client.chat.completions.create(
            model=self.model,
            messages=self._convert_to_platform_messages(messages),
            stream=True,
            **llm_settings,
        )
        stream_response = self._convert_to_streamresponse(platform_stream_response)
        return stream_response


# omni 2024-05-13 --------------------------------------------------------------------------------------------
class OpenAIGPT4O_20240513(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.005, output=0.015)
    model: str = "gpt-4o-2024-05-13"
    context_window: int = 128_000


# 2024-04-09 --------------------------------------------------------------------------------------------
class OpenAIGPT4T_20240409(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.01, output=0.03)  # 10倍/15倍
    model: str = "gpt-4-turbo-2024-04-09"
    # model: str = "gpt-4-turbo-2024-04-09"
    context_window: int = 128_000


# 0125 --------------------------------------------------------------------------------------------
class OpenAIGPT35T_0125(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.0005, output=0.0015)
    model: str = "gpt-3.5-turbo-0125"
    context_window: int = 16_385


class OpenAIGPT4T_0125(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.01, output=0.03)
    model: str = "gpt-4-0125-preview"
    context_window: int = 128_000


class OpenAIGPT35TFT_0125(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.003, output=0.006)
    context_window: int = 16_385

    def __init__(self, model_name: str, client_setting: ClientSettings) -> None:
        super().__init__(client_setting)
        self.model = model_name


# 1106 --------------------------------------------------------------------------------------------
class OpenAIGPT35T_1106(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.0010, output=0.0020)
    model: str = "gpt-3.5-turbo-1106"
    context_window: int = 16_385


class OpenAIGPT4T_1106(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.01, output=0.03)
    model: str = "gpt-4-1106-preview"
    context_window: int = 128_000


class OpenAIGPT4VT_1106(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.01, output=0.03)
    model: str = "gpt-4-1106-vision-preview"
    context_window: int = 128_000


class OpenAIGPT35TFT_1106(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.003, output=0.006)
    context_window: int = 4_096

    def __init__(self, model_name: str, client_setting: ClientSettings) -> None:
        super().__init__(client_setting)
        self.model = model_name


# 0613 --------------------------------------------------------------------------------------------
class OpenAIGPT35T_0613(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.0015, output=0.002)
    model: str = "gpt-3.5-turbo-0613"
    context_window: int = 4_096


class OpenAIGPT35T16k_0613(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.003, output=0.004)
    model: str = "gpt-3.5-turbo-16k-0613"
    context_window: int = 16_385


class OpenAIGPT4_0613(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.03, output=0.06)
    model: str = "gpt-4-0613"
    context_window: int = 8_192


class OpenAIGPT432k_0613(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.06, output=0.12)
    model: str = "gpt-4-32k-0613"
    context_window: int = 32_768


class OpenAIGPT35TFT_0613(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.003, output=0.006)
    context_window: int = 4_096

    def __init__(self, model_name: str, client_setting: ClientSettings) -> None:
        super().__init__(client_setting)
        self.model = model_name


class OpenAIGPT4FT_0613(OpenAILLM):
    dollar_per_ktoken = APIPricing(input=0.045, output=0.090)
    context_window: int = 8_192

    def __init__(self, model_name: str, client_setting: ClientSettings) -> None:
        super().__init__(client_setting)
        self.model = model_name