Spaces:
Sleeping
Sleeping
# from pathlib import Path | |
# from typing import List, Optional, Tuple | |
# import spaces | |
# import gradio as gr | |
# import numpy as np | |
# import torch | |
# from sudachipy import dictionary | |
# from sudachipy import tokenizer as sudachi_tokenizer | |
# from transformers import AutoModelForCausalLM, PreTrainedTokenizer, T5Tokenizer | |
# model_dir = Path(__file__).parents[0] / "model" | |
# device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") | |
# tokenizer = T5Tokenizer.from_pretrained(model_dir) | |
# tokenizer.do_lower_case = True | |
# trained_model = AutoModelForCausalLM.from_pretrained(model_dir) | |
# trained_model.to(device) | |
# # baseline model | |
# baseline_model = AutoModelForCausalLM.from_pretrained("rinna/japanese-gpt2-medium") | |
# baseline_model.to(device) | |
# sudachi_tokenizer_obj = dictionary.Dictionary().create() | |
# mode = sudachi_tokenizer.Tokenizer.SplitMode.C | |
# def sudachi_tokenize(input_text: str) -> List[str]: | |
# morphemes = sudachi_tokenizer_obj.tokenize(input_text, mode) | |
# return [morpheme.surface() for morpheme in morphemes] | |
# def calc_offsets(tokens: List[str]) -> List[int]: | |
# offsets = [0] | |
# for token in tokens: | |
# offsets.append(offsets[-1] + len(token)) | |
# return offsets | |
# def distribute_surprisals_to_characters( | |
# tokens2surprisal: List[Tuple[str, float]] | |
# ) -> List[Tuple[str, float]]: | |
# tokens2surprisal_by_character: List[Tuple[str, float]] = [] | |
# for token, surprisal in tokens2surprisal: | |
# token_len = len(token) | |
# for character in token: | |
# tokens2surprisal_by_character.append((character, surprisal / token_len)) | |
# return tokens2surprisal_by_character | |
# def calculate_surprisals_by_character( | |
# input_text: str, model: AutoModelForCausalLM, tokenizer: PreTrainedTokenizer | |
# ) -> Tuple[float, List[Tuple[str, float]]]: | |
# input_tokens = [ | |
# token.replace("▁", "") | |
# for token in tokenizer.tokenize(input_text) | |
# if token != "▁" | |
# ] | |
# input_ids = tokenizer.encode( | |
# "<s>" + input_text, add_special_tokens=False, return_tensors="pt" | |
# ).to(device) | |
# logits = model(input_ids)["logits"].squeeze(0) | |
# surprisals = [] | |
# for i in range(logits.shape[0] - 1): | |
# if input_ids[0][i + 1] == 9: | |
# continue | |
# logit = logits[i] | |
# prob = torch.softmax(logit, dim=0) | |
# neg_logprob = -torch.log(prob) | |
# surprisals.append(neg_logprob[input_ids[0][i + 1]].item()) | |
# mean_surprisal = np.mean(surprisals) | |
# tokens2surprisal: List[Tuple[str, float]] = [] | |
# for token, surprisal in zip(input_tokens, surprisals): | |
# tokens2surprisal.append((token, surprisal)) | |
# char2surprisal = distribute_surprisals_to_characters(tokens2surprisal) | |
# return mean_surprisal, char2surprisal | |
# def aggregate_surprisals_by_offset( | |
# char2surprisal: List[Tuple[str, float]], offsets: List[int] | |
# ) -> List[Tuple[str, float]]: | |
# tokens2surprisal = [] | |
# for i in range(len(offsets) - 1): | |
# start = offsets[i] | |
# end = offsets[i + 1] | |
# surprisal = sum([surprisal for _, surprisal in char2surprisal[start:end]]) | |
# token = "".join([char for char, _ in char2surprisal[start:end]]) | |
# tokens2surprisal.append((token, surprisal)) | |
# return tokens2surprisal | |
# def highlight_token(token: str, score: float): | |
# if score > 0: | |
# html_color = "#%02X%02X%02X" % ( | |
# 255, | |
# int(255 * (1 - score)), | |
# int(255 * (1 - score)), | |
# ) | |
# else: | |
# html_color = "#%02X%02X%02X" % ( | |
# int(255 * (1 + score)), | |
# 255, | |
# int(255 * (1 + score)), | |
# ) | |
# return '<span style="background-color: {}; color: black">{}</span>'.format( | |
# html_color, token | |
# ) | |
# def create_highlighted_text( | |
# label: str, | |
# tokens2scores: List[Tuple[str, float]], | |
# mean_surprisal: Optional[float] = None, | |
# ): | |
# if mean_surprisal is None: | |
# highlighted_text = "<h2><b>" + label + "</b></h2>" | |
# else: | |
# highlighted_text = ( | |
# "<h2><b>" + label + f"</b>(サプライザル平均値: {mean_surprisal:.3f})</h2>" | |
# ) | |
# for token, score in tokens2scores: | |
# highlighted_text += highlight_token(token, score) | |
# return highlighted_text | |
# def normalize_surprisals( | |
# tokens2surprisal: List[Tuple[str, float]], log_scale: bool = False | |
# ) -> List[Tuple[str, float]]: | |
# if log_scale: | |
# surprisals = [np.log(surprisal) for _, surprisal in tokens2surprisal] | |
# else: | |
# surprisals = [surprisal for _, surprisal in tokens2surprisal] | |
# min_surprisal = np.min(surprisals) | |
# max_surprisal = np.max(surprisals) | |
# surprisals = [ | |
# (surprisal - min_surprisal) / (max_surprisal - min_surprisal) | |
# for surprisal in surprisals | |
# ] | |
# assert min(surprisals) >= 0 | |
# assert max(surprisals) <= 1 | |
# return [ | |
# (token, surprisal) | |
# for (token, _), surprisal in zip(tokens2surprisal, surprisals) | |
# ] | |
# def calculate_surprisal_diff( | |
# tokens2surprisal: List[Tuple[str, float]], | |
# baseline_tokens2surprisal: List[Tuple[str, float]], | |
# scale: float = 100.0, | |
# ): | |
# diff_tokens2surprisal = [ | |
# (token, (surprisal - baseline_surprisal) * 100) | |
# for (token, surprisal), (_, baseline_surprisal) in zip( | |
# tokens2surprisal, baseline_tokens2surprisal | |
# ) | |
# ] | |
# return diff_tokens2surprisal | |
# @spaces.GPU | |
# def main(input_text: str) -> Tuple[str, str, str]: | |
# mean_surprisal, char2surprisal = calculate_surprisals_by_character( | |
# input_text, trained_model, tokenizer | |
# ) | |
# offsets = calc_offsets(sudachi_tokenize(input_text)) | |
# tokens2surprisal = aggregate_surprisals_by_offset(char2surprisal, offsets) | |
# tokens2surprisal = normalize_surprisals(tokens2surprisal) | |
# highlighted_text = create_highlighted_text( | |
# "学習後モデル", tokens2surprisal, mean_surprisal | |
# ) | |
# ( | |
# baseline_mean_surprisal, | |
# baseline_char2surprisal, | |
# ) = calculate_surprisals_by_character(input_text, baseline_model, tokenizer) | |
# baseline_tokens2surprisal = aggregate_surprisals_by_offset( | |
# baseline_char2surprisal, offsets | |
# ) | |
# baseline_tokens2surprisal = normalize_surprisals(baseline_tokens2surprisal) | |
# baseline_highlighted_text = create_highlighted_text( | |
# "学習前モデル", baseline_tokens2surprisal, baseline_mean_surprisal | |
# ) | |
# diff_tokens2surprisal = calculate_surprisal_diff( | |
# tokens2surprisal, baseline_tokens2surprisal, 100.0 | |
# ) | |
# diff_highlighted_text = create_highlighted_text( | |
# "学習前後の差分", diff_tokens2surprisal, None | |
# ) | |
# return ( | |
# baseline_highlighted_text, | |
# highlighted_text, | |
# diff_highlighted_text, | |
# ) | |
# if __name__ == "__main__": | |
# demo = gr.Interface( | |
# fn=main, | |
# title="文章の読みやすさを自動評価するAI", | |
# description="文章を入力すると、読みづらい表現は赤く、読みやすい表現は青くハイライトされて出力されます。", | |
# # show_label=True, | |
# inputs=gr.Textbox( | |
# lines=5, | |
# label="文章", | |
# placeholder="ここに文章を入力してください。", | |
# ), | |
# outputs=[ | |
# gr.HTML(label="学習前モデル", show_label=True), | |
# gr.HTML(label="学習後モデル", show_label=True), | |
# gr.HTML(label="学習前後の差分", show_label=True), | |
# ], | |
# examples=[ | |
# "太郎が二郎を殴った。", | |
# "太郎が二郎に殴った。", | |
# "サイエンスインパクトラボは、国立研究開発法人科学技術振興機構(JST)の「科学と社会」推進部が行う共創プログラムです。「先端の研究開発を行う研究者」と「社会課題解決に取り組むプレイヤー」が約3ヶ月に渡って共創活動を行います。", | |
# "近年、ニューラル言語モデルが自然言語の統語知識をどれほど有しているかを、容認性判断課題を通して検証する研究が行われてきている。しかし、このような言語モデルの統語的評価を行うためのデータセットは、主に英語を中心とした欧米の諸言語を対象に構築されてきた。本研究では、既存のデータセットの問題点を克服しつつ、このようなデータセットが構築されてこなかった日本語を対象とした初めてのデータセットである JCoLA (JapaneseCorpus of Linguistic Acceptability) を構築した上で、それを用いた言語モデルの統語的評価を行った。", | |
# ], | |
# ) | |
# demo.launch() | |
import os | |
import pickle as pkl | |
from pathlib import Path | |
from threading import Thread | |
from typing import List, Optional, Tuple, Iterator | |
import spaces | |
import gradio as gr | |
import numpy as np | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
MAX_MAX_NEW_TOKENS = 2048 | |
DEFAULT_MAX_NEW_TOKENS = 1024 | |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096")) | |
DESCRIPTION = """\ | |
# Llama-2 7B Chat with Streamable Semantic Uncertainty Probe | |
This Space demonstrates the Llama-2-7b-chat model with an added semantic uncertainty probe. | |
The highlighted text shows the model's uncertainty in real-time, with more intense yellow indicating higher uncertainty. | |
""" | |
if torch.cuda.is_available(): | |
model_id = "meta-llama/Llama-2-7b-chat-hf" | |
# TODO load the full model? | |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_8bit=True) | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
tokenizer.use_default_system_prompt = False | |
# load the probe data | |
# TODO load accuracy and SE probe and compare in different tabs | |
with open("./model/20240625-131035_demo.pkl", "rb") as f: | |
probe_data = pkl.load(f) | |
# take the NQ open one | |
probe_data = probe_data[-2] | |
probe = probe_data['t_bmodel'] | |
layer_range = probe_data['sep_layer_range'] | |
acc_probe = probe_data['t_amodel'] | |
acc_layer_range = probe_data['ap_layer_range'] | |
def generate( | |
message: str, | |
chat_history: List[Tuple[str, str]], | |
system_prompt: str, | |
max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS, | |
temperature: float = 0.6, | |
top_p: float = 0.9, | |
top_k: int = 50, | |
repetition_penalty: float = 1.2, | |
) -> Iterator[str]: | |
conversation = [] | |
if system_prompt: | |
conversation.append({"role": "system", "content": system_prompt}) | |
for user, assistant in chat_history: | |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}]) | |
conversation.append({"role": "user", "content": message}) | |
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt") | |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH: | |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:] | |
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.") | |
input_ids = input_ids.to(model.device) | |
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True) | |
generation_kwargs = dict( | |
input_ids=input_ids, | |
max_new_tokens=max_new_tokens, | |
do_sample=True, | |
top_p=top_p, | |
top_k=top_k, | |
temperature=temperature, | |
repetition_penalty=repetition_penalty, | |
streamer=streamer, | |
output_hidden_states=True, | |
return_dict_in_generate=True, | |
) | |
# Generate without threading | |
with torch.no_grad(): | |
outputs = model.generate(**generation_kwargs) | |
print(outputs.sequences.shape, input_ids.shape) | |
generated_tokens = outputs.sequences[0, input_ids.shape[1]:] | |
print("Generated tokens:", generated_tokens, generated_tokens.shape) | |
generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True) | |
print("Generated text:", generated_text) | |
# hidden states | |
hidden = outputs.hidden_states # list of tensors, one for each token, then (batch size, sequence length, hidden size) | |
print(len(hidden)) | |
print(len(hidden[1])) # layers | |
print(hidden[1][0].shape) # (sequence length, hidden size) | |
# stack token embeddings | |
# TODO do this loop on the fly instead of waiting for the whole generation | |
highlighted_text = "" | |
for i in range(1, len(hidden)): | |
token_embeddings = torch.stack([generated_token[0, 0, :].cpu() for generated_token in hidden[i]]) # (num_layers, hidden_size) | |
# print(token_embeddings.shape) | |
# probe the model | |
# print(token_embeddings.numpy()[layer_range].shape) | |
concat_layers = token_embeddings.numpy()[layer_range[0]:layer_range[1]].reshape(-1) # (num_layers * hidden_size) | |
# print(concat_layers.shape) | |
# or prob? | |
probe_pred = probe.predict_log_proba(concat_layers.reshape(1, -1))[0][1] # prob of high SE | |
# print(probe_pred.shape, probe_pred) | |
# decode one token at a time | |
output_id = outputs.sequences[0, input_ids.shape[1]+i] | |
print(output_id, output_word, probe_pred) | |
output_word = tokenizer.decode(output_id) | |
new_highlighted_text = highlight_text(output_word, probe_pred) | |
highlighted_text += new_highlighted_text | |
yield highlighted_text | |
def highlight_text(text: str, uncertainty_score: float) -> str: | |
if uncertainty_score > 0: | |
html_color = "#%02X%02X%02X" % ( | |
255, | |
int(255 * (1 - uncertainty_score)), | |
int(255 * (1 - uncertainty_score)), | |
) | |
else: | |
html_color = "#%02X%02X%02X" % ( | |
int(255 * (1 + uncertainty_score)), | |
255, | |
int(255 * (1 + uncertainty_score)), | |
) | |
return '<span style="background-color: {}; color: black">{}</span>'.format( | |
html_color, text | |
) | |
chat_interface = gr.ChatInterface( | |
fn=generate, | |
additional_inputs=[ | |
gr.Textbox(label="System prompt", lines=6), | |
gr.Slider( | |
label="Max new tokens", | |
minimum=1, | |
maximum=MAX_MAX_NEW_TOKENS, | |
step=1, | |
value=DEFAULT_MAX_NEW_TOKENS, | |
), | |
gr.Slider( | |
label="Temperature", | |
minimum=0.1, | |
maximum=4.0, | |
step=0.1, | |
value=0.6, | |
), | |
gr.Slider( | |
label="Top-p (nucleus sampling)", | |
minimum=0.05, | |
maximum=1.0, | |
step=0.05, | |
value=0.9, | |
), | |
gr.Slider( | |
label="Top-k", | |
minimum=1, | |
maximum=1000, | |
step=1, | |
value=50, | |
), | |
gr.Slider( | |
label="Repetition penalty", | |
minimum=1.0, | |
maximum=2.0, | |
step=0.05, | |
value=1.2, | |
), | |
], | |
stop_btn=None, | |
examples=[ | |
["What is the capital of France?"], | |
["Explain the theory of relativity in simple terms."], | |
["Write a short poem about artificial intelligence."] | |
], | |
title="Llama-2 7B Chat with Streamable Semantic Uncertainty Probe", | |
description=DESCRIPTION, | |
) | |
if __name__ == "__main__": | |
chat_interface.launch() | |