File size: 1,323 Bytes
63a5c24
 
 
5fb8127
63a5c24
 
 
 
 
 
 
 
5fb8127
 
 
 
 
63a5c24
8b0e392
5fb8127
 
63a5c24
5fb8127
8b0e392
 
 
 
 
 
 
5fb8127
 
63a5c24
 
 
5fb8127
63a5c24
 
 
 
 
 
 
 
 
 
 
5fb8127
 
 
8b0e392
5fb8127
 
8f2181a
de14443
5fb8127
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import os
from typing import List, Tuple

import gradio as gr
from openai import OpenAI



client = OpenAI(
    base_url=f"{os.environ['BASE_URL']}/v1",
    api_key=os.environ["API_KEY"],
)



def respond(
    message,
    history: List[Tuple[str, str]],
    conversational,
    max_tokens,
):
    messages = []

    if conversational:
        for val in history[-2:]:
            if val[0]:
                messages.append({"role": "user", "content": val[0]})
            if val[1]:
                messages.append({"role": "assistant", "content": val[1]})

    messages.append({"role": "user", "content": message})

    completion = client.chat.completions.create(
        model="neongeckocom/NeonLLM",
        messages=messages,
        max_tokens=max_tokens,
        temperature=0,
        extra_body={
            "repetition_penalty": 1.05,
            "use_beam_search": True,
            "best_of": 5,
        },
    )
    response = completion.choices[0].message.content
    return response


demo = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Checkbox(value=True, label="conversational"),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
    ],
    title="NeonLLM (v2024-05-15)",
    concurrency_limit=5,
)


if __name__ == "__main__":
    demo.launch()