File size: 4,277 Bytes
5416372
a2e6c05
 
2cd2fd0
 
 
 
 
 
d81ed7c
a2e6c05
0887657
d81ed7c
a2e6c05
a65de5c
1f4df8c
fd701cd
a2e6c05
1a382ff
d81ed7c
 
 
edb02ce
d81ed7c
 
 
e976361
 
2cd2fd0
a2e6c05
 
e976361
a2e6c05
e976361
4993069
a2e6c05
 
d81ed7c
a2e6c05
 
776563f
a2e6c05
 
 
 
 
e976361
 
 
a2e6c05
e976361
a2e6c05
d81ed7c
a2e6c05
d81ed7c
a2e6c05
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d81ed7c
 
 
 
1f4df8c
e976361
d81ed7c
 
 
 
 
 
e976361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d81ed7c
 
b23a519
 
 
88a7fc3
18e5a55
72fd759
18e5a55
3b39700
014d21e
 
a2e6c05
 
 
 
 
 
da97aea
bbb4a8b
 
 
 
 
d81ed7c
 
 
a2e6c05
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import spaces
import json
import subprocess
from llama_cpp import Llama
from llama_cpp_agent import LlamaCppAgent
from llama_cpp_agent import MessagesFormatterType
from llama_cpp_agent.providers import LlamaCppPythonProvider
from llama_cpp_agent.chat_history import BasicChatHistory
from llama_cpp_agent.chat_history.messages import Roles
import gradio as gr
from huggingface_hub import hf_hub_download
from ui import css, PLACEHOLDER

hf_hub_download(repo_id="bartowski/dolphin-2.9.1-yi-1.5-34b-GGUF", filename="dolphin-2.9.1-yi-1.5-34b-Q6_K.gguf",  local_dir = "./models")
hf_hub_download(repo_id="crusoeai/dolphin-2.9.1-llama-3-70b-GGUF", filename="dolphin-2.9.1-llama-3-70b.Q3_K_M.gguf",  local_dir = "./models")
hf_hub_download(repo_id="kroonen/dolphin-2.9.2-Phi-3-Medium-GGUF", filename="dolphin-2.9.2-Phi-3-Medium-Q6_K.gguf",  local_dir = "./models")
# hf_hub_download(repo_id="crusoeai/dolphin-2.9.1-llama-3-8b-GGUF", filename="dolphin-2.9.1-llama-3-8b.Q6_K.gguf",  local_dir = "./models")

@spaces.GPU(duration=120)
def respond(
    message,
    history: list[tuple[str, str]],
    model,
    max_tokens,
    temperature,
    top_p,
    top_k,
    repeat_penalty,
):  
    llm = Llama(
        model_path=f"models/{model}",
        flash_attn=True,
        n_gpu_layers=81,
        n_batch=1024,
        n_ctx=8192,
    )
    provider = LlamaCppPythonProvider(llm)

    agent = LlamaCppAgent(
        provider,
        system_prompt="You are Dolphin an AI assistant that helps humanity.",
        predefined_messages_formatter_type=MessagesFormatterType.CHATML,
        debug_output=True
    )
    
    settings = provider.get_provider_default_settings()
    settings.temperature = temperature
    settings.top_k = top_k
    settings.top_p = top_p
    settings.max_tokens = max_tokens
    settings.repeat_penalty = repeat_penalty
    settings.stream = True

    messages = BasicChatHistory()

    for msn in history:
        user = {
            'role': Roles.user,
            'content': msn[0]
        }
        assistant = {
            'role': Roles.assistant,
            'content': msn[1]
        }
        messages.add_message(user)
        messages.add_message(assistant)
    
    stream = agent.get_chat_response(message, llm_sampling_settings=settings, chat_history=messages, returns_streaming_generator=True, print_output=False)
    
    outputs = ""
    for output in stream:
        outputs += output
        yield outputs

demo = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Dropdown(['dolphin-2.9.1-yi-1.5-34b-Q6_K.gguf', 'dolphin-2.9.1-llama-3-70b.Q3_K_M.gguf', 'dolphin-2.9.2-Phi-3-Medium-Q6_K.gguf'], value="dolphin-2.9.1-llama-3-70b.Q3_K_M.gguf", label="Model"),
        gr.Slider(minimum=1, maximum=8192, value=8192, step=1, label="Max tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(
            minimum=0.1,
            maximum=1.0,
            value=0.95,
            step=0.05,
            label="Top-p",
        ),
        gr.Slider(
            minimum=0,
            maximum=100,
            value=40,
            step=1,
            label="Top-k",
        ),
        gr.Slider(
            minimum=0.0,
            maximum=2.0,
            value=1.1,
            step=0.1,
            label="Repetition penalty",
        ),
    ],
    theme=gr.themes.Soft(primary_hue="indigo", secondary_hue="blue", neutral_hue="gray",font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
        body_background_fill_dark="#0f172a",
        block_background_fill_dark="#0f172a",
        block_border_width="1px",
        block_title_background_fill_dark="#070d1b",
        input_background_fill_dark="#0c1425",
        button_secondary_background_fill_dark="#070d1b",
        border_color_primary_dark="#21293b",
        background_fill_secondary_dark="#0f172a",
        color_accent_soft_dark="transparent"
    ),
    css=css,
    retry_btn="Retry",
    undo_btn="Undo",
    clear_btn="Clear",
    submit_btn="Send",
    description="Cognitive Computation: Chat Dolphin 🐬",
    chatbot=gr.Chatbot(
        scale=1,
        placeholder=PLACEHOLDER,
        show_copy_button=True
    )
)

if __name__ == "__main__":
    demo.launch()