Spaces:
Running
on
Zero
Running
on
Zero
# login as a privileged user. | |
import os | |
HF_TOKEN = os.environ.get("HF_TOKEN") | |
from huggingface_hub import login | |
login(token=HF_TOKEN) | |
from threading import Thread | |
from typing import Iterator | |
import gradio as gr | |
import spaces | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
from pyreft import ReftModel | |
MAX_MAX_NEW_TOKENS = 2048 | |
DEFAULT_MAX_NEW_TOKENS = 1024 | |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096")) | |
DESCRIPTION = """\ | |
# Reft-Emoji-Chat | |
### What's Reft-Emoji-Chat? | |
Reft-Emoji-Chat is our emoji-chat with ReFT. It is trained with 10 training examples under 50 seconds. You can train your own ReFT agent and share it on HuggingFace by following this [tutorial](https://github.com/stanfordnlp/pyreft/tree/main/examples/gradio/train_and_share.ipynb)! | |
""" | |
LICENSE = """ | |
<p/> | |
--- | |
As a derivate work of [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta, | |
this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md). | |
""" | |
if not torch.cuda.is_available(): | |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>" | |
if torch.cuda.is_available(): | |
model_id = "meta-llama/Llama-2-7b-chat-hf" # not gated version. | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, device_map="cuda", torch_dtype=torch.bfloat16 | |
) | |
reft_model = ReftModel.load("pyvene/reft_emoji_chat", model, from_huggingface_hub=True) | |
reft_model.set_device("cuda") | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
tokenizer.use_default_system_prompt = True | |
prompt_no_input_template = """<s>[INST] <<SYS>> | |
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. | |
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. | |
<</SYS>> | |
%s [/INST] | |
""" | |
def generate( | |
message: str, | |
chat_history: list[tuple[str, str]], | |
max_new_tokens: int = 1024, | |
) -> Iterator[str]: | |
# tokenize and prepare the input | |
prompt = prompt_no_input_template % message | |
prompt = tokenizer(prompt, return_tensors="pt").to(model.device) | |
input_ids = prompt["input_ids"] | |
attention_mask = prompt["attention_mask"] | |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH: | |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:] | |
attention_mask = attention_mask[:, -MAX_INPUT_TOKEN_LENGTH:] | |
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.") | |
base_unit_location = input_ids.shape[-1] - 1 # last position | |
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True) | |
generate_kwargs = { | |
"base": {"input_ids": input_ids, "attention_mask": attention_mask}, | |
"unit_locations": {"sources->base": (None, [[[base_unit_location]]])}, | |
"max_new_tokens": max_new_tokens, | |
"intervene_on_prompt": True, | |
"streamer": streamer, | |
"eos_token_id": tokenizer.eos_token_id, | |
"early_stopping": True, | |
"do_sample": False | |
} | |
t = Thread(target=reft_model.generate, kwargs=generate_kwargs) | |
t.start() | |
outputs = [] | |
for text in streamer: | |
outputs.append(text) | |
yield "".join(outputs) | |
chat_interface = gr.ChatInterface( | |
fn=generate, | |
additional_inputs=[ | |
gr.Slider( | |
label="Max new tokens", | |
minimum=1, | |
maximum=MAX_MAX_NEW_TOKENS, | |
step=1, | |
value=DEFAULT_MAX_NEW_TOKENS, | |
) | |
], | |
stop_btn=None, | |
examples=[ | |
["What's 2+2?"], | |
["Why is the sky blue?"], | |
["What's Apple's stock price?"], | |
["Plan a family road trip to Austin"], | |
], | |
) | |
with gr.Blocks(css="style.css") as demo: | |
gr.Markdown(DESCRIPTION) | |
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button") | |
chat_interface.render() | |
gr.Markdown(LICENSE) | |
if __name__ == "__main__": | |
demo.queue(max_size=20).launch() | |