Update app.py
Browse files
app.py
CHANGED
@@ -45,7 +45,7 @@ suggested_interpretation_prompts = ["Before responding, let me repeat the messag
|
|
45 |
def initialize_gpu():
|
46 |
pass
|
47 |
|
48 |
-
def get_hidden_states(
|
49 |
original_prompt = original_prompt_template.format(prompt=raw_original_prompt)
|
50 |
model_inputs = tokenizer(original_prompt, add_special_tokens=False, return_tensors="pt").to(model.device)
|
51 |
tokens = tokenizer.batch_decode(model_inputs.input_ids[0])
|
@@ -191,6 +191,6 @@ with gr.Blocks(theme=gr.themes.Default(), css=css) as demo:
|
|
191 |
], [*interpretation_bubbles])
|
192 |
|
193 |
original_prompt_btn.click(get_hidden_states,
|
194 |
-
[
|
195 |
[global_state, *tokens_container])
|
196 |
demo.launch()
|
|
|
45 |
def initialize_gpu():
|
46 |
pass
|
47 |
|
48 |
+
def get_hidden_states(raw_original_prompt, progress=gr.Progress()):
|
49 |
original_prompt = original_prompt_template.format(prompt=raw_original_prompt)
|
50 |
model_inputs = tokenizer(original_prompt, add_special_tokens=False, return_tensors="pt").to(model.device)
|
51 |
tokens = tokenizer.batch_decode(model_inputs.input_ids[0])
|
|
|
191 |
], [*interpretation_bubbles])
|
192 |
|
193 |
original_prompt_btn.click(get_hidden_states,
|
194 |
+
[original_prompt_raw],
|
195 |
[global_state, *tokens_container])
|
196 |
demo.launch()
|