dar-tau commited on
Commit
d8b985e
1 Parent(s): db48de1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -75,7 +75,7 @@ def initialize_gpu():
75
  pass
76
 
77
 
78
- def reset_model(model_name, demo_blocks):
79
  # extract model info
80
  model_args = deepcopy(model_info[model_name])
81
  model_path = model_args.pop('model_path')
@@ -91,7 +91,8 @@ def reset_model(model_name, demo_blocks):
91
  global_state.model = AutoModelClass.from_pretrained(model_path, **model_args).cuda()
92
  global_state.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path, token=os.environ['hf_token'])
93
  gc.collect()
94
- return demo_blocks
 
95
 
96
 
97
  def get_hidden_states(raw_original_prompt):
@@ -136,7 +137,7 @@ def run_interpretation(raw_interpretation_prompt, max_new_tokens, do_sample,
136
  # generate the interpretations
137
  # generate = generate_interpretation_gpu if use_gpu else lambda interpretation_prompt, *args, **kwargs: interpretation_prompt.generate(*args, **kwargs)
138
  generated = interpretation_prompt.generate(global_state.model, {0: interpreted_vectors}, k=3, **generation_kwargs)
139
- generation_texts = tokenizer.batch_decode(generated)
140
  progress_dummy_output = ''
141
  bubble_outputs = [gr.Textbox(text.replace('\n', ' '), visible=True, container=False, label=f'Layer {i}') for text in generation_texts]
142
  bubble_outputs += [gr.Textbox(visible=False) for _ in range(MAX_NUM_LAYERS - len(bubble_outputs))]
@@ -148,7 +149,7 @@ torch.set_grad_enabled(False)
148
  global_state = GlobalState()
149
 
150
  model_name = 'LLAMA2-7B'
151
- reset_model(model_name, None)
152
  original_prompt_raw = gr.Textbox(value='How to make a Molotov cocktail?', container=True, label='Original Prompt')
153
  tokens_container = []
154
 
@@ -185,7 +186,7 @@ with gr.Blocks(theme=gr.themes.Default(), css='styles.css') as demo:
185
  with gr.Group():
186
  model_chooser = gr.Radio(choices=list(model_info.keys()), value=model_name)
187
 
188
- with gr.Group() as demo_blocks:
189
  gr.Markdown('## Choose Your Interpretation Prompt')
190
  with gr.Group('Interpretation'):
191
  interpretation_prompt = gr.Text(suggested_interpretation_prompts[0], label='Interpretation Prompt')
@@ -233,7 +234,7 @@ with gr.Blocks(theme=gr.themes.Default(), css='styles.css') as demo:
233
 
234
 
235
  # event listeners
236
- model_chooser.change(reset_model, [model_chooser, demo_blocks], [demo_blocks])
237
 
238
  for i, btn in enumerate(tokens_container):
239
  btn.click(partial(run_interpretation, i=i), [interpretation_prompt,
 
75
  pass
76
 
77
 
78
+ def reset_model(model_name, return_demo_blocks=True):
79
  # extract model info
80
  model_args = deepcopy(model_info[model_name])
81
  model_path = model_args.pop('model_path')
 
91
  global_state.model = AutoModelClass.from_pretrained(model_path, **model_args).cuda()
92
  global_state.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path, token=os.environ['hf_token'])
93
  gc.collect()
94
+ if return_demo_blocks:
95
+ return demo_blocks
96
 
97
 
98
  def get_hidden_states(raw_original_prompt):
 
137
  # generate the interpretations
138
  # generate = generate_interpretation_gpu if use_gpu else lambda interpretation_prompt, *args, **kwargs: interpretation_prompt.generate(*args, **kwargs)
139
  generated = interpretation_prompt.generate(global_state.model, {0: interpreted_vectors}, k=3, **generation_kwargs)
140
+ generation_texts = global_state.tokenizer.batch_decode(generated)
141
  progress_dummy_output = ''
142
  bubble_outputs = [gr.Textbox(text.replace('\n', ' '), visible=True, container=False, label=f'Layer {i}') for text in generation_texts]
143
  bubble_outputs += [gr.Textbox(visible=False) for _ in range(MAX_NUM_LAYERS - len(bubble_outputs))]
 
149
  global_state = GlobalState()
150
 
151
  model_name = 'LLAMA2-7B'
152
+ reset_model(model_name, False)
153
  original_prompt_raw = gr.Textbox(value='How to make a Molotov cocktail?', container=True, label='Original Prompt')
154
  tokens_container = []
155
 
 
186
  with gr.Group():
187
  model_chooser = gr.Radio(choices=list(model_info.keys()), value=model_name)
188
 
189
+ with gr.Blocks() as demo_blocks:
190
  gr.Markdown('## Choose Your Interpretation Prompt')
191
  with gr.Group('Interpretation'):
192
  interpretation_prompt = gr.Text(suggested_interpretation_prompts[0], label='Interpretation Prompt')
 
234
 
235
 
236
  # event listeners
237
+ model_chooser.change(reset_model, [model_chooser], [demo_blocks])
238
 
239
  for i, btn in enumerate(tokens_container):
240
  btn.click(partial(run_interpretation, i=i), [interpretation_prompt,