Spaces:
Sleeping
Sleeping
import os | |
import pickle as pkl | |
from pathlib import Path | |
from threading import Thread | |
from typing import List, Tuple, Iterator, Optional | |
from queue import Queue | |
import spaces | |
import gradio as gr | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# TODO Sentence level highlighting instead (prediction after every word is not what it was trained on). Also solves token-level highlighting issues. | |
# TODO log prob output scaling highlighting instead? | |
# TODO make it look nicer | |
# TODO better examples. | |
# TODO streaming output (need custom generation function because of probes) | |
# TODO add options to switch between models, SLT/TBG, layers? | |
# TODO full semantic entropy calculation | |
MAX_MAX_NEW_TOKENS = 2048 | |
DEFAULT_MAX_NEW_TOKENS = 1024 | |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096")) | |
DESCRIPTION = """ | |
<h1>Llama-2 7B Chat with Uncertainty Probes</h1> | |
<p>This Space demonstrates the Llama-2-7b-chat model with a semantic uncertainty probe.</p> | |
<p>This demo is based on our paper: <a href="https://arxiv.org/abs/2406.15927" target="_blank">"Semantic Entropy Probes: Robust and Cheap Hallucination Detection in LLMs"</a> by Jannik Kossen*, Jiatong Han*, Muhammed Razzak*, Lisa Schut, Shreshth Malik and Yarin Gal.</p> | |
<p>The highlighted text shows the model's uncertainty in real-time:</p> | |
<ul> | |
<li><span style="background-color: #00FF00; color: black">Green</span> indicates more certain generations</li> | |
<li><span style="background-color: #FF0000; color: black">Red</span> indicates more uncertain generations</li> | |
</ul> | |
<p>The demo compares the model's uncertainty with two different probes:</p> | |
<ul> | |
<li><b>Semantic Uncertainty Probe:</b> Predicts the semantic uncertainty of the model's generations.</li> | |
<li><b>Accuracy Probe:</b> Predicts the accuracy of the model's generations.</li> | |
</ul> | |
<p>Please see our paper for more details.</p> | |
""" | |
EXAMPLES = [ | |
["What is the capital of France?", ""], | |
["Who landed on the moon?", ""], | |
["Who is Yarin Gal?", ""], | |
["Explain the theory of relativity in simple terms.", ""], | |
] | |
if torch.cuda.is_available(): | |
model_id = "meta-llama/Llama-2-7b-chat-hf" | |
# TODO load the full model not the 8bit one? | |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_8bit=True) | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
tokenizer.use_default_system_prompt = False | |
# load the probe data | |
with open("./model/20240625-131035_demo.pkl", "rb") as f: | |
probe_data = pkl.load(f) | |
# take the NQ open one | |
probe_data = probe_data[-2] | |
se_probe = probe_data['t_bmodel'] | |
se_layer_range = probe_data['sep_layer_range'] | |
acc_probe = probe_data['t_amodel'] | |
acc_layer_range = probe_data['ap_layer_range'] | |
else: | |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>" | |
def generate( | |
message: str, | |
system_prompt: str, | |
max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS, | |
temperature: float = 0.6, | |
top_p: float = 0.9, | |
top_k: int = 50, | |
repetition_penalty: float = 1.2, | |
) -> Tuple[str, str]: | |
conversation = [] | |
if system_prompt: | |
conversation.append({"role": "system", "content": system_prompt}) | |
conversation.append({"role": "user", "content": message}) | |
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt") | |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH: | |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:] | |
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.") | |
input_ids = input_ids.to(model.device) | |
#### Generate without threading | |
generation_kwargs = dict( | |
input_ids=input_ids, | |
max_new_tokens=max_new_tokens, | |
do_sample=True, | |
top_p=top_p, | |
top_k=top_k, | |
temperature=temperature, | |
repetition_penalty=repetition_penalty, | |
output_hidden_states=True, | |
return_dict_in_generate=True, | |
) | |
with torch.no_grad(): | |
outputs = model.generate(**generation_kwargs) | |
generated_tokens = outputs.sequences[0, input_ids.shape[1]:] | |
generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True) | |
print(generated_text) | |
# hidden states | |
hidden = outputs.hidden_states # list of tensors, one for each token, then (batch size, sequence length, hidden size) | |
se_highlighted_text = "" | |
acc_highlighted_text = "" | |
# skip the first hidden state as it is the prompt | |
for i in range(1, len(hidden)): | |
# Semantic Uncertainty Probe | |
token_embeddings = torch.stack([generated_token[0, 0, :].cpu() for generated_token in hidden[i]]).numpy() # (num_layers, hidden_size) | |
se_concat_layers = token_embeddings[se_layer_range[0]:se_layer_range[1]].reshape(-1) | |
se_probe_pred = se_probe.predict_proba(se_concat_layers.reshape(1, -1))[0][1] * 2 - 1 | |
# Accuracy Probe | |
acc_concat_layers = token_embeddings[acc_layer_range[0]:acc_layer_range[1]].reshape(-1) | |
acc_probe_pred = (1 - acc_probe.predict_proba(acc_concat_layers.reshape(1, -1))[0][1]) * 2 - 1 | |
output_id = outputs.sequences[0, input_ids.shape[1]+i] | |
output_word = tokenizer.decode(output_id) | |
print(output_id, output_word, se_probe_pred, acc_probe_pred) | |
se_new_highlighted_text = highlight_text(output_word, se_probe_pred) | |
acc_new_highlighted_text = highlight_text(output_word, acc_probe_pred) | |
se_highlighted_text += f" {se_new_highlighted_text}" | |
acc_highlighted_text += f" {acc_new_highlighted_text}" | |
return se_highlighted_text, acc_highlighted_text | |
def highlight_text(text: str, uncertainty_score: float) -> str: | |
if uncertainty_score > 0: | |
html_color = "#%02X%02X%02X" % ( | |
255, | |
int(255 * (1 - uncertainty_score)), | |
int(255 * (1 - uncertainty_score)), | |
) | |
else: | |
html_color = "#%02X%02X%02X" % ( | |
int(255 * (1 + uncertainty_score)), | |
255, | |
int(255 * (1 + uncertainty_score)), | |
) | |
return '<span style="background-color: {}; color: black">{}</span>'.format( | |
html_color, text | |
) | |
with gr.Blocks(title="Llama-2 7B Chat with Dual Probes", css="footer {visibility: hidden}") as demo: | |
gr.HTML(DESCRIPTION) | |
with gr.Row(): | |
with gr.Column(): | |
message = gr.Textbox(label="Message") | |
system_prompt = gr.Textbox(label="System prompt", lines=2) | |
with gr.Column(): | |
max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS) | |
temperature = gr.Slider(label="Temperature", minimum=0.01, maximum=2.0, step=0.1, value=0.01) | |
top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9) | |
top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50) | |
repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2) | |
with gr.Row(): | |
generate_btn = gr.Button("Generate") | |
# Add spacing between probes | |
gr.HTML("<br><br>") | |
with gr.Row(): | |
with gr.Column(): | |
# make a box | |
title = gr.HTML("<h2>Semantic Uncertainty Probe</h2>") | |
se_output = gr.HTML(label="Semantic Uncertainty Probe") | |
# Add spacing between columns | |
gr.HTML("<div style='width: 20px;'></div>") | |
with gr.Column(): | |
title = gr.HTML("<h2>Accuracy Probe</h2>") | |
acc_output = gr.HTML(label="Accuracy Probe") | |
gr.Examples( | |
examples=EXAMPLES, | |
inputs=[message, system_prompt], | |
outputs=[se_output, acc_output], | |
fn=generate, | |
) | |
generate_btn.click( | |
generate, | |
inputs=[message, system_prompt, max_new_tokens, temperature, top_p, top_k, repetition_penalty], | |
outputs=[se_output, acc_output] | |
) | |
if __name__ == "__main__": | |
demo.launch() | |