Spaces:
Sleeping
Sleeping
File size: 15,525 Bytes
fa78257 3dc3966 fa78257 3dc3966 bb01eaa 3dc3966 fa78257 3dc3966 fa78257 3dc3966 fa78257 3dc3966 fa78257 3dc3966 fa78257 72953cd fa78257 3dc3966 fa78257 3dc3966 fa78257 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 |
# from pathlib import Path
# from typing import List, Optional, Tuple
# import spaces
# import gradio as gr
# import numpy as np
# import torch
# from sudachipy import dictionary
# from sudachipy import tokenizer as sudachi_tokenizer
# from transformers import AutoModelForCausalLM, PreTrainedTokenizer, T5Tokenizer
# model_dir = Path(__file__).parents[0] / "model"
# device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
# tokenizer = T5Tokenizer.from_pretrained(model_dir)
# tokenizer.do_lower_case = True
# trained_model = AutoModelForCausalLM.from_pretrained(model_dir)
# trained_model.to(device)
# # baseline model
# baseline_model = AutoModelForCausalLM.from_pretrained("rinna/japanese-gpt2-medium")
# baseline_model.to(device)
# sudachi_tokenizer_obj = dictionary.Dictionary().create()
# mode = sudachi_tokenizer.Tokenizer.SplitMode.C
# def sudachi_tokenize(input_text: str) -> List[str]:
# morphemes = sudachi_tokenizer_obj.tokenize(input_text, mode)
# return [morpheme.surface() for morpheme in morphemes]
# def calc_offsets(tokens: List[str]) -> List[int]:
# offsets = [0]
# for token in tokens:
# offsets.append(offsets[-1] + len(token))
# return offsets
# def distribute_surprisals_to_characters(
# tokens2surprisal: List[Tuple[str, float]]
# ) -> List[Tuple[str, float]]:
# tokens2surprisal_by_character: List[Tuple[str, float]] = []
# for token, surprisal in tokens2surprisal:
# token_len = len(token)
# for character in token:
# tokens2surprisal_by_character.append((character, surprisal / token_len))
# return tokens2surprisal_by_character
# def calculate_surprisals_by_character(
# input_text: str, model: AutoModelForCausalLM, tokenizer: PreTrainedTokenizer
# ) -> Tuple[float, List[Tuple[str, float]]]:
# input_tokens = [
# token.replace("▁", "")
# for token in tokenizer.tokenize(input_text)
# if token != "▁"
# ]
# input_ids = tokenizer.encode(
# "<s>" + input_text, add_special_tokens=False, return_tensors="pt"
# ).to(device)
# logits = model(input_ids)["logits"].squeeze(0)
# surprisals = []
# for i in range(logits.shape[0] - 1):
# if input_ids[0][i + 1] == 9:
# continue
# logit = logits[i]
# prob = torch.softmax(logit, dim=0)
# neg_logprob = -torch.log(prob)
# surprisals.append(neg_logprob[input_ids[0][i + 1]].item())
# mean_surprisal = np.mean(surprisals)
# tokens2surprisal: List[Tuple[str, float]] = []
# for token, surprisal in zip(input_tokens, surprisals):
# tokens2surprisal.append((token, surprisal))
# char2surprisal = distribute_surprisals_to_characters(tokens2surprisal)
# return mean_surprisal, char2surprisal
# def aggregate_surprisals_by_offset(
# char2surprisal: List[Tuple[str, float]], offsets: List[int]
# ) -> List[Tuple[str, float]]:
# tokens2surprisal = []
# for i in range(len(offsets) - 1):
# start = offsets[i]
# end = offsets[i + 1]
# surprisal = sum([surprisal for _, surprisal in char2surprisal[start:end]])
# token = "".join([char for char, _ in char2surprisal[start:end]])
# tokens2surprisal.append((token, surprisal))
# return tokens2surprisal
# def highlight_token(token: str, score: float):
# if score > 0:
# html_color = "#%02X%02X%02X" % (
# 255,
# int(255 * (1 - score)),
# int(255 * (1 - score)),
# )
# else:
# html_color = "#%02X%02X%02X" % (
# int(255 * (1 + score)),
# 255,
# int(255 * (1 + score)),
# )
# return '<span style="background-color: {}; color: black">{}</span>'.format(
# html_color, token
# )
# def create_highlighted_text(
# label: str,
# tokens2scores: List[Tuple[str, float]],
# mean_surprisal: Optional[float] = None,
# ):
# if mean_surprisal is None:
# highlighted_text = "<h2><b>" + label + "</b></h2>"
# else:
# highlighted_text = (
# "<h2><b>" + label + f"</b>(サプライザル平均値: {mean_surprisal:.3f})</h2>"
# )
# for token, score in tokens2scores:
# highlighted_text += highlight_token(token, score)
# return highlighted_text
# def normalize_surprisals(
# tokens2surprisal: List[Tuple[str, float]], log_scale: bool = False
# ) -> List[Tuple[str, float]]:
# if log_scale:
# surprisals = [np.log(surprisal) for _, surprisal in tokens2surprisal]
# else:
# surprisals = [surprisal for _, surprisal in tokens2surprisal]
# min_surprisal = np.min(surprisals)
# max_surprisal = np.max(surprisals)
# surprisals = [
# (surprisal - min_surprisal) / (max_surprisal - min_surprisal)
# for surprisal in surprisals
# ]
# assert min(surprisals) >= 0
# assert max(surprisals) <= 1
# return [
# (token, surprisal)
# for (token, _), surprisal in zip(tokens2surprisal, surprisals)
# ]
# def calculate_surprisal_diff(
# tokens2surprisal: List[Tuple[str, float]],
# baseline_tokens2surprisal: List[Tuple[str, float]],
# scale: float = 100.0,
# ):
# diff_tokens2surprisal = [
# (token, (surprisal - baseline_surprisal) * 100)
# for (token, surprisal), (_, baseline_surprisal) in zip(
# tokens2surprisal, baseline_tokens2surprisal
# )
# ]
# return diff_tokens2surprisal
# @spaces.GPU
# def main(input_text: str) -> Tuple[str, str, str]:
# mean_surprisal, char2surprisal = calculate_surprisals_by_character(
# input_text, trained_model, tokenizer
# )
# offsets = calc_offsets(sudachi_tokenize(input_text))
# tokens2surprisal = aggregate_surprisals_by_offset(char2surprisal, offsets)
# tokens2surprisal = normalize_surprisals(tokens2surprisal)
# highlighted_text = create_highlighted_text(
# "学習後モデル", tokens2surprisal, mean_surprisal
# )
# (
# baseline_mean_surprisal,
# baseline_char2surprisal,
# ) = calculate_surprisals_by_character(input_text, baseline_model, tokenizer)
# baseline_tokens2surprisal = aggregate_surprisals_by_offset(
# baseline_char2surprisal, offsets
# )
# baseline_tokens2surprisal = normalize_surprisals(baseline_tokens2surprisal)
# baseline_highlighted_text = create_highlighted_text(
# "学習前モデル", baseline_tokens2surprisal, baseline_mean_surprisal
# )
# diff_tokens2surprisal = calculate_surprisal_diff(
# tokens2surprisal, baseline_tokens2surprisal, 100.0
# )
# diff_highlighted_text = create_highlighted_text(
# "学習前後の差分", diff_tokens2surprisal, None
# )
# return (
# baseline_highlighted_text,
# highlighted_text,
# diff_highlighted_text,
# )
# if __name__ == "__main__":
# demo = gr.Interface(
# fn=main,
# title="文章の読みやすさを自動評価するAI",
# description="文章を入力すると、読みづらい表現は赤く、読みやすい表現は青くハイライトされて出力されます。",
# # show_label=True,
# inputs=gr.Textbox(
# lines=5,
# label="文章",
# placeholder="ここに文章を入力してください。",
# ),
# outputs=[
# gr.HTML(label="学習前モデル", show_label=True),
# gr.HTML(label="学習後モデル", show_label=True),
# gr.HTML(label="学習前後の差分", show_label=True),
# ],
# examples=[
# "太郎が二郎を殴った。",
# "太郎が二郎に殴った。",
# "サイエンスインパクトラボは、国立研究開発法人科学技術振興機構(JST)の「科学と社会」推進部が行う共創プログラムです。「先端の研究開発を行う研究者」と「社会課題解決に取り組むプレイヤー」が約3ヶ月に渡って共創活動を行います。",
# "近年、ニューラル言語モデルが自然言語の統語知識をどれほど有しているかを、容認性判断課題を通して検証する研究が行われてきている。しかし、このような言語モデルの統語的評価を行うためのデータセットは、主に英語を中心とした欧米の諸言語を対象に構築されてきた。本研究では、既存のデータセットの問題点を克服しつつ、このようなデータセットが構築されてこなかった日本語を対象とした初めてのデータセットである JCoLA (JapaneseCorpus of Linguistic Acceptability) を構築した上で、それを用いた言語モデルの統語的評価を行った。",
# ],
# )
# demo.launch()
import os
import pickle as pkl
from pathlib import Path
from threading import Thread
from typing import List, Optional, Tuple, Iterator
import spaces
import gradio as gr
import numpy as np
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
DESCRIPTION = """\
# Llama-2 7B Chat with Streamable Semantic Uncertainty Probe
This Space demonstrates the Llama-2-7b-chat model with an added semantic uncertainty probe.
The highlighted text shows the model's uncertainty in real-time, with more intense yellow indicating higher uncertainty.
"""
if torch.cuda.is_available():
model_id = "meta-llama/Llama-2-7b-chat-hf"
# TODO load the full model?
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_8bit=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.use_default_system_prompt = False
# load the probe data
# TODO load accuracy and SE probe and compare in different tabs
with open("./model/20240625-131035_demo.pkl", "rb") as f:
probe_data = pkl.load(f)
# take the NQ open one
probe_data = probe_data[-2]
probe = probe_data['t_bmodel']
layer_range = probe_data['sep_layer_range']
acc_probe = probe_data['t_amodel']
acc_layer_range = probe_data['ap_layer_range']
@spaces.GPU
def generate(
message: str,
chat_history: List[Tuple[str, str]],
system_prompt: str,
max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.2,
) -> Iterator[str]:
conversation = []
if system_prompt:
conversation.append({"role": "system", "content": system_prompt})
for user, assistant in chat_history:
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = dict(
input_ids=input_ids,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=top_p,
top_k=top_k,
temperature=temperature,
repetition_penalty=repetition_penalty,
streamer=streamer,
output_hidden_states=True,
return_dict_in_generate=True,
)
# Generate without threading
with torch.no_grad():
outputs = model.generate(**generation_kwargs)
print(outputs.sequences.shape, input_ids.shape)
generated_tokens = outputs.sequences[0, input_ids.shape[1]:]
print("Generated tokens:", generated_tokens, generated_tokens.shape)
generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True)
print("Generated text:", generated_text)
# hidden states
hidden = outputs.hidden_states # list of tensors, one for each token, then (batch size, sequence length, hidden size)
print(len(hidden))
print(len(hidden[1])) # layers
print(hidden[1][0].shape) # (sequence length, hidden size)
# stack token embeddings
# TODO do this loop on the fly instead of waiting for the whole generation
highlighted_text = ""
for i in range(1, len(hidden)):
token_embeddings = torch.stack([generated_token[0, 0, :].cpu() for generated_token in hidden[i]]) # (num_layers, hidden_size)
# print(token_embeddings.shape)
# probe the model
# print(token_embeddings.numpy()[layer_range].shape)
concat_layers = token_embeddings.numpy()[layer_range[0]:layer_range[1]].reshape(-1) # (num_layers * hidden_size)
# print(concat_layers.shape)
# or prob?
probe_pred = probe.predict_log_proba(concat_layers.reshape(1, -1))[0][1] # prob of high SE
# print(probe_pred.shape, probe_pred)
# decode one token at a time
output_id = outputs.sequences[0, input_ids.shape[1]+i]
print(output_id, output_word, probe_pred)
output_word = tokenizer.decode(output_id)
new_highlighted_text = highlight_text(output_word, probe_pred)
highlighted_text += new_highlighted_text
yield highlighted_text
def highlight_text(text: str, uncertainty_score: float) -> str:
if uncertainty_score > 0:
html_color = "#%02X%02X%02X" % (
255,
int(255 * (1 - uncertainty_score)),
int(255 * (1 - uncertainty_score)),
)
else:
html_color = "#%02X%02X%02X" % (
int(255 * (1 + uncertainty_score)),
255,
int(255 * (1 + uncertainty_score)),
)
return '<span style="background-color: {}; color: black">{}</span>'.format(
html_color, text
)
chat_interface = gr.ChatInterface(
fn=generate,
additional_inputs=[
gr.Textbox(label="System prompt", lines=6),
gr.Slider(
label="Max new tokens",
minimum=1,
maximum=MAX_MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
),
gr.Slider(
label="Temperature",
minimum=0.1,
maximum=4.0,
step=0.1,
value=0.6,
),
gr.Slider(
label="Top-p (nucleus sampling)",
minimum=0.05,
maximum=1.0,
step=0.05,
value=0.9,
),
gr.Slider(
label="Top-k",
minimum=1,
maximum=1000,
step=1,
value=50,
),
gr.Slider(
label="Repetition penalty",
minimum=1.0,
maximum=2.0,
step=0.05,
value=1.2,
),
],
stop_btn=None,
examples=[
["What is the capital of France?"],
["Explain the theory of relativity in simple terms."],
["Write a short poem about artificial intelligence."]
],
title="Llama-2 7B Chat with Streamable Semantic Uncertainty Probe",
description=DESCRIPTION,
)
if __name__ == "__main__":
chat_interface.launch()
|