File size: 1,873 Bytes
e3278e4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
from typing import List, Union

from litellm.types.llms.openai import AllMessageValues, OpenAITextCompletionUserMessage

from ...base_llm.completion.transformation import BaseTextCompletionConfig
from ...openai.completion.utils import _transform_prompt
from ..common_utils import FireworksAIMixin


class FireworksAITextCompletionConfig(FireworksAIMixin, BaseTextCompletionConfig):
    def get_supported_openai_params(self, model: str) -> list:
        """
        See how LiteLLM supports Provider-specific parameters - https://docs.litellm.ai/docs/completion/provider_specific_params#proxy-usage
        """
        return [
            "max_tokens",
            "logprobs",
            "echo",
            "temperature",
            "top_p",
            "top_k",
            "frequency_penalty",
            "presence_penalty",
            "n",
            "stop",
            "response_format",
            "stream",
            "user",
        ]

    def map_openai_params(
        self,
        non_default_params: dict,
        optional_params: dict,
        model: str,
        drop_params: bool,
    ) -> dict:
        supported_params = self.get_supported_openai_params(model)
        for k, v in non_default_params.items():
            if k in supported_params:
                optional_params[k] = v
        return optional_params

    def transform_text_completion_request(
        self,
        model: str,
        messages: Union[List[AllMessageValues], List[OpenAITextCompletionUserMessage]],
        optional_params: dict,
        headers: dict,
    ) -> dict:
        prompt = _transform_prompt(messages=messages)

        if not model.startswith("accounts/"):
            model = f"accounts/fireworks/models/{model}"

        data = {
            "model": model,
            "prompt": prompt,
            **optional_params,
        }
        return data