File size: 2,821 Bytes
5c0e14a
 
ba171f6
5c0e14a
 
 
 
8efda6a
5c0e14a
61b8b6c
5031e72
5c0e14a
 
1e6821d
5c0e14a
 
 
 
 
 
2689763
5c0e14a
 
 
 
 
 
 
 
 
 
 
ba171f6
5c0e14a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2d1261
4611fc3
c2d1261
5c0e14a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ba171f6
aeeac2d
5cfb556
 
ba171f6
 
 
3319b7e
5c0e14a
 
61b8b6c
5c0e14a
aeeac2d
5c0e14a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
from huggingface_hub import InferenceClient
import gradio as gr
import random

API_URL = "https://api-inference.huggingface.co/models/"

client = InferenceClient(
    "mistralai/Mistral-7B-Instruct-v0.1"
)

initial_prompt = "Hi there BibleAI"

def format_prompt(message, history):
  prompt = "You are a christian bible ai nammed BibleAI, you will always focus on religion and belive in christianity is the truth. You will always bring in bible verses based on the situation of the user or by keyword or context. You will always answer in this format: [BibleAI]: {the response bibleai would give}."
  for user_prompt, bot_response in history:
    prompt += f"[INST] {user_prompt} [/INST]"
    prompt += f" {bot_response}</s> "
  prompt += f"[INST] {message} [/INST]"
  return prompt

def generate(prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=random.randint(0, 10**7),
    )

    formatted_prompt = format_prompt(prompt, history)

    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
        yield output
    return output


additional_inputs=[
    gr.Slider(
        label="Temperature",
        value=0.9,
        minimum=0.0,
        maximum=1.0,
        step=0.05,
        interactive=True,
        info="Higher values produce more diverse outputs",
    ),
    gr.Slider(
        label="Max new tokens",
        value=2048,
        minimum=64,
        maximum=4096,
        step=64,
        interactive=True,
        info="The maximum numbers of new tokens",
    ),
    gr.Slider(
        label="Top-p (nucleus sampling)",
        value=0.90,
        minimum=0.0,
        maximum=1,
        step=0.05,
        interactive=True,
        info="Higher values sample more low-probability tokens",
    ),
    gr.Slider(
        label="Repetition penalty",
        value=1.2,
        minimum=1.0,
        maximum=2.0,
        step=0.05,
        interactive=True,
        info="Penalize repeated tokens",
    )
]

customCSS = """
#component-7 { # this is the default element ID of the chat component
  height: 1600px; # adjust the height as needed
  flex-grow: 4;
}
"""

with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.ChatInterface(
        generate,
        inputs=[gr.Textbox("User", default=initial_prompt)],
        additional_inputs=additional_inputs,
    )

demo.queue().launch(debug=True)