ZEUS-8B-V2-CHAT / app.py
T145
Updated linting
010ff90
raw
history blame
2.19 kB
import threading
import time
import gradio as gr
from huggingface_hub import HfApi
from llama_cpp import Llama
API = HfApi()
LLM = Llama.from_pretrained(
repo_id="mradermacher/ZEUS-8B-V2-i1-GGUF",
filename="*Q4_K_M.gguf",
chat_format="chatml",
verbose=False
)
def refresh(how_much=43200): # default to 12 hour
time.sleep(how_much)
try:
API.restart_space(repo_id="T145/ZEUS-8B-V2-CHAT")
except Exception as e:
print(f"Error while rebooting, trying again... {e}")
refresh(600) # 10 minutes if any error happens
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in LLM.create_chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
if "choices" not in message:
continue
token = message["choices"][0]["delta"]
if "content" not in token:
continue
token = token["content"]
if token.strip() == "|":
break
response += token
yield response
if __name__ == "__main__":
demo = gr.ChatInterface(
fn=respond,
type="messages",
additional_inputs=[
gr.Textbox(value="You are a friendly assistant.", label="System message"),
gr.Slider(minimum=100, maximum=2048, value=1024, step=2, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
threading.Thread(target=refresh).start()
demo.launch()