File size: 5,297 Bytes
a9409d4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import os
import gradio as gr
from typing import Iterator

from dialog import get_dialog_box
from gateway import check_server_health, request_generation

# CONSTANTS
MAX_NEW_TOKENS: int = 2048

# GET ENVIRONMENT VARIABLES
CLOUD_GATEWAY_API = os.getenv("API_ENDPOINT")


def toggle_ui():
    """
    Function to toggle the visibility of the UI based on the server health
    Returns:
        hide/show main ui/dialog
    """
    health = check_server_health(cloud_gateway_api=CLOUD_GATEWAY_API)
    if health:
        return gr.update(visible=True), gr.update(visible=False)    # Show main UI, hide dialog
    else:
        return gr.update(visible=False), gr.update(visible=True)    # Hide main UI, show dialog


def generate(
        message: str,
        chat_history: list,
        system_prompt: str,
        max_new_tokens: int = 1024,
        temperature: float = 0.6,
        top_p: float = 0.9,
        top_k: int = 50,
        repetition_penalty: float = 1.2,
) -> Iterator[str]:
    """Send a request to backend, fetch the streaming responses and emit to the UI.

    Args:
        message (str): input message from the user
        chat_history (list[tuple[str, str]]): entire chat history of the session
        system_prompt (str): system prompt
        max_new_tokens (int, optional): maximum number of tokens to generate, ignoring the number of tokens in the
                                        prompt. Defaults to 1024.
        temperature (float, optional): the value used to module the next token probabilities. Defaults to 0.6.
        top_p (float, optional): if set to float<1, only the smallest set of most probable tokens with probabilities
                                    that add up to top_p or higher are kept for generation. Defaults to 0.9.
        top_k (int, optional): the number of highest probability vocabulary tokens to keep for top-k-filtering.
                                Defaults to 50.
        repetition_penalty (float, optional): the parameter for repetition penalty. 1.0 means no penalty.
                                Defaults to 1.2.

    Yields:
        Iterator[str]: Streaming responses to the UI
    """
    # sample method to yield responses from the llm model
    outputs = []
    for text in request_generation(message=message,
                                   system_prompt=system_prompt,
                                   max_new_tokens=max_new_tokens,
                                   temperature=temperature,
                                   top_p=top_p,
                                   top_k=top_k,
                                   repetition_penalty=repetition_penalty,
                                   cloud_gateway_api=CLOUD_GATEWAY_API):
        outputs.append(text)
        yield "".join(outputs)


chat_interface = gr.ChatInterface(
    fn=generate,
    additional_inputs=[
        gr.Textbox(label="System prompt", lines=6),
        gr.Slider(
            label="Max New Tokens",
            minimum=1,
            maximum=MAX_NEW_TOKENS,
            step=1,
            value=1024,
        ),
        gr.Slider(
            label="Temperature",
            minimum=0.1,
            maximum=4.0,
            step=0.1,
            value=0.1,
        ),
        gr.Slider(
            label="Top-p (nucleus sampling)",
            minimum=0.05,
            maximum=1.0,
            step=0.05,
            value=0.95,
        ),
        gr.Slider(
            label="Top-k",
            minimum=1,
            maximum=1000,
            step=1,
            value=50,
        ),
        gr.Slider(
            label="Repetition penalty",
            minimum=1.0,
            maximum=2.0,
            step=0.05,
            value=1.2,
        ),
    ],
    stop_btn=None,
    examples=[
        ["Hello there! How are you doing?"],
        ["Can you explain briefly to me what is the Python programming language?"],
        ["Explain the plot of Cinderella in a sentence."],
        ["How many hours does it take a man to eat a Helicopter?"],
        ["Write a 100-word article on 'Benefits of Open-Source in AI research'."],
    ],
    cache_examples=False,
    chatbot=gr.Chatbot(
            height=600)
)

with gr.Blocks(css="style.css", theme=gr.themes.Default()) as demo:
    # Get the server status before displaying UI
    visibility = check_server_health(CLOUD_GATEWAY_API)

    # Container for the main interface
    with gr.Column(visible=visibility, elem_id="main_ui") as main_ui:
        gr.Markdown(f"""
            # Llama-3 8B Chat
            This Space is an Alpha release that demonstrates model [Llama-3-8b-chat](https://huggingface.co/meta-llama/Meta-Llama-3-8B) by Meta, a Llama 3 model with 8B parameters fine-tuned for chat instructions, running on AMD MI210 infrastructure. Feel free to play with it!
            """)
        chat_interface.render()

    # Dialog box using Markdown for the error message
    with gr.Row(visible=(not visibility), elem_id="dialog_box") as dialog_box:
        # Add spinner and message
        get_dialog_box()

    # Timer to check server health every 5 seconds and update UI
    timer = gr.Timer(value=10)
    timer.tick(fn=toggle_ui, outputs=[main_ui, dialog_box])


if __name__ == "__main__":
    demo.queue(max_size=int(os.getenv("QUEUE"))).launch()