File size: 3,016 Bytes
4304c6d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import os
from collections.abc import Generator

import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.openllm.llm.llm import OpenLLMLargeLanguageModel


def test_validate_credentials_for_chat_model():
    model = OpenLLMLargeLanguageModel()

    with pytest.raises(CredentialsValidateFailedError):
        model.validate_credentials(
            model='NOT IMPORTANT',
            credentials={
                'server_url': 'invalid_key',
            }
        )

    model.validate_credentials(
        model='NOT IMPORTANT',
        credentials={
            'server_url': os.environ.get('OPENLLM_SERVER_URL'),
        }
    )

def test_invoke_model():
    model = OpenLLMLargeLanguageModel()

    response = model.invoke(
        model='NOT IMPORTANT',
        credentials={
            'server_url': os.environ.get('OPENLLM_SERVER_URL'),
        },
        prompt_messages=[
            UserPromptMessage(
                content='Hello World!'
            )
        ],
        model_parameters={
            'temperature': 0.7,
            'top_p': 1.0,
            'top_k': 1,
        },
        stop=['you'],
        user="abc-123",
        stream=False
    )

    assert isinstance(response, LLMResult)
    assert len(response.message.content) > 0
    assert response.usage.total_tokens > 0

def test_invoke_stream_model():
    model = OpenLLMLargeLanguageModel()

    response = model.invoke(
        model='NOT IMPORTANT',
        credentials={
            'server_url': os.environ.get('OPENLLM_SERVER_URL'),
        },
        prompt_messages=[
            UserPromptMessage(
                content='Hello World!'
            )
        ],
        model_parameters={
            'temperature': 0.7,
            'top_p': 1.0,
            'top_k': 1,
        },
        stop=['you'],
        stream=True,
        user="abc-123"
    )

    assert isinstance(response, Generator)
    for chunk in response:
        assert isinstance(chunk, LLMResultChunk)
        assert isinstance(chunk.delta, LLMResultChunkDelta)
        assert isinstance(chunk.delta.message, AssistantPromptMessage)
        assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True

def test_get_num_tokens():
    model = OpenLLMLargeLanguageModel()

    response = model.get_num_tokens(
        model='NOT IMPORTANT',
        credentials={
            'server_url': os.environ.get('OPENLLM_SERVER_URL'),
        },
        prompt_messages=[
            UserPromptMessage(
                content='Hello World!'
            )
        ],
        tools=[]
    )

    assert isinstance(response, int)
    assert response == 3