File size: 3,023 Bytes
af50914
1f62c32
b61f27b
 
464dbdc
b61f27b
 
f3c274b
 
b61f27b
 
f3c274b
 
477ff0c
 
3eda7dc
 
 
 
 
 
 
fbec402
 
 
 
 
3eda7dc
 
 
 
 
 
 
 
 
 
464dbdc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3eda7dc
 
 
 
 
 
 
 
 
 
815c5d5
 
3eda7dc
 
 
 
 
 
 
 
 
 
 
2a2834b
3eda7dc
 
 
477ff0c
 
e2f3989
 
 
 
 
 
 
 
477ff0c
 
 
 
e2f3989
477ff0c
 
e2f3989
477ff0c
 
e2f3989
477ff0c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from brand_tune import prompting

import gradio as gr
import openai
import hashlib


SALT = "this is some random text 4e881d478cbeb1f6e5cb416770"
CORRECT_HASH = "222632858bf11308f0a7d41cb9fc061a51a47c98da85699706cc663b24aed55e"


def hash(password: str):
    return hashlib.sha256((password + SALT).encode()).hexdigest()


def gradio_history_to_openai_history(gradio_history: list[list[str]]):
    openai_history = [
        {
            "role": "system",
            "content": prompting.PROMPT_TEMPLATE,
        },
    ]
    if prompting.USE_EXAMPLE:
        openai_history += [
            {"role": "user", "content": prompting.EXAMPLE_INPUT},
            {"role": "assistant", "content": prompting.EXAMPLE_OUTPUT},
        ]

    for gradio_message in gradio_history:
        openai_history.append({"role": "user", "content": gradio_message[0]})
        if gradio_message[1]:
            openai_history.append({"role": "assistant", "content": gradio_message[1]})

    return openai_history


def bot(history: list[list[str]]):
    if hash(history[-1][0]) == CORRECT_HASH:
        history[-1][1] = "Correct! You can now use the chatbot."
        yield history
        return
    else:
        found = False
        for user_message, bot_message in history:
            if hash(user_message) == CORRECT_HASH:
                found = True
                break
        if not found:
            history[-1][1] = "Incorrect password. Try again."
            yield history
            return

    try:
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=gradio_history_to_openai_history(history),
            stream=True,
        )
    except Exception as e:
        # An openai.error.RateLimitError can happen,
        # but we can also catch other exceptions just in case
        history[-1][1] = f"[ERROR] {type(e)}: {e}"
        yield history
        return

    history[-1][1] = ""
    for chunk in response:
        choice = chunk.choices[0]
        if choice.finish_reason is not None:
            break

        # The first chunk just says that the role is "assistant"
        # and doesn't have any content (text)
        if hasattr(choice.delta, "content"):
            history[-1][1] += choice.delta.content
            # print(choice.delta.content)

        yield history


with gr.Blocks() as interface:
    chatbot = gr.Chatbot(label="Brand Sheriff")
    msg = gr.Textbox(
        show_label=False,
        placeholder="Ex: caption a tiktok post about a weird ai-generated image",
    )
    with gr.Row():
        interrupt = gr.Button("Interrupt")
        clear = gr.Button("Clear")

    def user(user_message, history):
        return "", history + [[user_message, None]]

    submit_event = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
        bot, chatbot, chatbot
    )
    interrupt.click(fn=None, inputs=None, outputs=None, cancels=[submit_event])
    clear.click(lambda: None, None, chatbot, queue=False)


interface.queue()