File size: 6,921 Bytes
e0169c8
 
 
 
eeafaaa
 
8b1c859
e0169c8
360f505
8b1c859
e0169c8
360f505
eeafaaa
 
 
e0169c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360f505
e0169c8
 
 
360f505
8b1c859
e0169c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b1c859
 
e0169c8
 
 
 
8b1c859
e0169c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b1c859
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eeafaaa
8b1c859
 
 
 
 
 
e0169c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b1c859
e0169c8
 
 
 
 
8b1c859
e0169c8
 
 
 
8b1c859
eeafaaa
8b1c859
 
 
 
e0169c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
import gradio as gr

from typing import Any, Dict, Generator, List

# from huggingface_hub import InferenceClient
# from transformers import AutoTokenizer
from jinja2 import Environment, FileSystemLoader

from settings import *
from gradio_app.backend.ChatGptInteractor import *


# tokenizer = AutoTokenizer.from_pretrained(LLM_NAME)
# HF_TOKEN = None
# hf_client = InferenceClient(LLM_NAME, token=HF_TOKEN)


def format_prompt(message: str, api_kind: str):
    """
    Formats the given message using a chat template.

    Args:
        message (str): The user message to be formatted.

    Returns:
        str: Formatted message after applying the chat template.
    """

    # Create a list of message dictionaries with role and content
    messages: List[Dict[str, Any]] = [{'role': 'user', 'content': message}]

    if api_kind == "openai":
        return messages
    elif api_kind == "hf":
        return tokenizer.apply_chat_template(messages, tokenize=False)
    else:
        raise ValueError("API is not supported")


def generate_hf(prompt: str, history: str, temperature: float = 0.9, max_new_tokens: int = 512,
                top_p: float = 0.6, repetition_penalty: float = 1.2) -> Generator[str, None, str]:
    """
    Generate a sequence of tokens based on a given prompt and history using Mistral client.

    Args:
        prompt (str): The initial prompt for the text generation.
        history (str): Context or history for the text generation.
        temperature (float, optional): The softmax temperature for sampling. Defaults to 0.9.
        max_new_tokens (int, optional): Maximum number of tokens to be generated. Defaults to 256.
        top_p (float, optional): Nucleus sampling probability. Defaults to 0.95.
        repetition_penalty (float, optional): Penalty for repeated tokens. Defaults to 1.0.

    Returns:
        Generator[str, None, str]: A generator yielding chunks of generated text.
                                   Returns a final string if an error occurs.
    """

    temperature = max(float(temperature), 1e-2)  # Ensure temperature isn't too low
    top_p = float(top_p)

    generate_kwargs = {
        'temperature': temperature,
        'max_new_tokens': max_new_tokens,
        'top_p': top_p,
        'repetition_penalty': repetition_penalty,
        'do_sample': True,
        'seed': 42,
    }

    formatted_prompt = format_prompt(prompt, "hf")

    try:
        stream = hf_client.text_generation(formatted_prompt, **generate_kwargs,
                                           stream=True, details=True, return_full_text=False)
        output = ""
        for response in stream:
            output += response.token.text
            yield output

    except Exception as e:
        if "Too Many Requests" in str(e):
            print("ERROR: Too many requests on Mistral client")
            gr.Warning("Unfortunately Mistral is unable to process")
            return "Unfortunately, I am not able to process your request now."
        elif "Authorization header is invalid" in str(e):
            print("Authetification error:", str(e))
            gr.Warning("Authentication error: HF token was either not provided or incorrect")
            return "Authentication error"
        else:
            print("Unhandled Exception:", str(e))
            gr.Warning("Unfortunately Mistral is unable to process")
            return "I do not know what happened, but I couldn't understand you."


env = Environment(loader=FileSystemLoader('gradio_app/templates'))
context_template = env.get_template('context_template.j2')
start_system_message = context_template.render(documents=[])


def construct_openai_messages(context, history):
    messages = [
        {
            "role": "system",
            "content": start_system_message,
        },
    ]
    for q, a in history:
        if len(a) == 0:  # the last message
            messages.append({
                "role": "system",
                "content": context,
            })
        messages.append({
            "role": "user",
            "content": q,
        })
        if len(a) != 0:  # some of the previous LLM answers
            messages.append({
                "role": "assistant",
                "content": a,
            })
    return messages


def generate_openai(messages):
    cgi = ChatGptInteractor(model_name=LLM_NAME)
    for part in cgi.chat_completion(messages, max_tokens=512, temperature=0, stream=True):
        yield cgi.get_stream_text(part)


def _generate_openai(prompt: str, history: str, temperature: float = 0.9, max_new_tokens: int = 512,
                    top_p: float = 0.6, repetition_penalty: float = 1.2) -> Generator[str, None, str]:
    """
    Generate a sequence of tokens based on a given prompt and history using Mistral client.

    Args:
        prompt (str): The initial prompt for the text generation.
        history (str): Context or history for the text generation.
        temperature (float, optional): The softmax temperature for sampling. Defaults to 0.9.
        max_new_tokens (int, optional): Maximum number of tokens to be generated. Defaults to 256.
        top_p (float, optional): Nucleus sampling probability. Defaults to 0.95.
        repetition_penalty (float, optional): Penalty for repeated tokens. Defaults to 1.0.

    Returns:
        Generator[str, None, str]: A generator yielding chunks of generated text.
                                   Returns a final string if an error occurs.
    """

    temperature = max(float(temperature), 1e-2)  # Ensure temperature isn't too low
    top_p = float(top_p)

    generate_kwargs = {
        'temperature': temperature,
        'max_tokens': max_new_tokens,
        'top_p': top_p,
        'frequency_penalty': max(-2., min(repetition_penalty, 2.)),
    }

    formatted_prompt = format_prompt(prompt, "openai")

    try:
        stream = openai.ChatCompletion.create(
            model=LLM_NAME,
            messages=formatted_prompt,
            **generate_kwargs,
            stream=True
        )
        output = ""
        for chunk in stream:
            output += chunk.choices[0].delta.get("content", "")
            yield output

    except Exception as e:
        if "Too Many Requests" in str(e):
            print("ERROR: Too many requests on OpenAI client")
            gr.Warning("Unfortunately OpenAI is unable to process")
            return "Unfortunately, I am not able to process your request now."
        elif "You didn't provide an API key" in str(e):
            print("Authetification error:", str(e))
            gr.Warning("Authentication error: OpenAI key was either not provided or incorrect")
            return "Authentication error"
        else:
            print("Unhandled Exception:", str(e))
            gr.Warning("Unfortunately OpenAI is unable to process")
            return "I do not know what happened, but I couldn't understand you."