Update app.py
Browse files
app.py
CHANGED
@@ -18,11 +18,15 @@ MAX_PROMPT_TOKENS = 60
|
|
18 |
MAX_NUM_LAYERS = 50
|
19 |
welcome_message = '**You are now running {model_name}!!** 🥳🥳🥳'
|
20 |
|
|
|
|
|
|
|
|
|
21 |
@dataclass
|
22 |
class GlobalState:
|
23 |
tokenizer : Optional[PreTrainedTokenizer] = None
|
24 |
model : Optional[PreTrainedModel] = None
|
25 |
-
|
26 |
interpretation_prompt_template : str = '{prompt}'
|
27 |
original_prompt_template : str = 'User: [X]\n\nAnswer: {prompt}'
|
28 |
layers_format : str = 'model.layers.{k}'
|
@@ -56,7 +60,7 @@ def reset_model(model_name, *extra_components, with_extra_components=True):
|
|
56 |
AutoModelClass = CAutoModelForCausalLM if use_ctransformers else AutoModelForCausalLM
|
57 |
|
58 |
# get model
|
59 |
-
global_state.model, global_state.tokenizer, global_state.
|
60 |
gc.collect()
|
61 |
global_state.model = AutoModelClass.from_pretrained(model_path, **model_args)
|
62 |
if not dont_cuda:
|
@@ -71,7 +75,7 @@ def reset_model(model_name, *extra_components, with_extra_components=True):
|
|
71 |
|
72 |
|
73 |
@spaces.GPU
|
74 |
-
def get_hidden_states(
|
75 |
model, tokenizer = global_state.model, global_state.tokenizer
|
76 |
original_prompt = global_state.original_prompt_template.format(prompt=raw_original_prompt)
|
77 |
model_inputs = tokenizer(original_prompt, add_special_tokens=False, return_tensors="pt").to(model.device)
|
@@ -82,7 +86,7 @@ def get_hidden_states(global_state, raw_original_prompt):
|
|
82 |
+ [gr.Button('', visible=False) for _ in range(MAX_PROMPT_TOKENS - len(tokens))])
|
83 |
progress_dummy_output = ''
|
84 |
invisible_bubbles = [gr.Textbox('', visible=False) for i in range(MAX_NUM_LAYERS)]
|
85 |
-
|
86 |
return [progress_dummy_output, *token_btns, *invisible_bubbles]
|
87 |
|
88 |
|
@@ -93,7 +97,7 @@ def run_interpretation(raw_interpretation_prompt, max_new_tokens, do_sample,
|
|
93 |
model = global_state.model
|
94 |
tokenizer = global_state.tokenizer
|
95 |
print(f'run {model}')
|
96 |
-
interpreted_vectors = torch.tensor(global_state.hidden_states[:, i]).to(model.device).to(model.dtype)
|
97 |
length_penalty = -length_penalty # unintuitively, length_penalty > 0 will make sequences longer, so we negate it
|
98 |
|
99 |
# generation parameters
|
|
|
18 |
MAX_NUM_LAYERS = 50
|
19 |
welcome_message = '**You are now running {model_name}!!** 🥳🥳🥳'
|
20 |
|
21 |
+
@dataclass
|
22 |
+
class LocalState:
|
23 |
+
hidden_states: Optional[torch.Tensor] = None
|
24 |
+
|
25 |
@dataclass
|
26 |
class GlobalState:
|
27 |
tokenizer : Optional[PreTrainedTokenizer] = None
|
28 |
model : Optional[PreTrainedModel] = None
|
29 |
+
local_state : LocalState = LocalState()
|
30 |
interpretation_prompt_template : str = '{prompt}'
|
31 |
original_prompt_template : str = 'User: [X]\n\nAnswer: {prompt}'
|
32 |
layers_format : str = 'model.layers.{k}'
|
|
|
60 |
AutoModelClass = CAutoModelForCausalLM if use_ctransformers else AutoModelForCausalLM
|
61 |
|
62 |
# get model
|
63 |
+
global_state.model, global_state.tokenizer, global_state.local_state = None, None, LocalState()
|
64 |
gc.collect()
|
65 |
global_state.model = AutoModelClass.from_pretrained(model_path, **model_args)
|
66 |
if not dont_cuda:
|
|
|
75 |
|
76 |
|
77 |
@spaces.GPU
|
78 |
+
def get_hidden_states(local_state, raw_original_prompt):
|
79 |
model, tokenizer = global_state.model, global_state.tokenizer
|
80 |
original_prompt = global_state.original_prompt_template.format(prompt=raw_original_prompt)
|
81 |
model_inputs = tokenizer(original_prompt, add_special_tokens=False, return_tensors="pt").to(model.device)
|
|
|
86 |
+ [gr.Button('', visible=False) for _ in range(MAX_PROMPT_TOKENS - len(tokens))])
|
87 |
progress_dummy_output = ''
|
88 |
invisible_bubbles = [gr.Textbox('', visible=False) for i in range(MAX_NUM_LAYERS)]
|
89 |
+
local_state.hidden_states = hidden_states.cpu().detach()
|
90 |
return [progress_dummy_output, *token_btns, *invisible_bubbles]
|
91 |
|
92 |
|
|
|
97 |
model = global_state.model
|
98 |
tokenizer = global_state.tokenizer
|
99 |
print(f'run {model}')
|
100 |
+
interpreted_vectors = torch.tensor(global_state.local_state.hidden_states[:, i]).to(model.device).to(model.dtype)
|
101 |
length_penalty = -length_penalty # unintuitively, length_penalty > 0 will make sequences longer, so we negate it
|
102 |
|
103 |
# generation parameters
|