dar-tau commited on
Commit
5b9a8b1
·
verified ·
1 Parent(s): d8c5a8d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -94,8 +94,8 @@ def run_interpretation(global_state, raw_interpretation_prompt, max_new_tokens,
94
  interpretation_prompt = InterpretationPrompt(tokenizer, interpretation_prompt)
95
 
96
  # generate the interpretations
97
- generate = generate_interpretation_gpu if use_gpu else lambda lambda interpretation_prompt, **kwargs: interpretation_prompt.generate(**kwargs)
98
- generated = generate(model, {0: interpreted_vectors}, k=3, **generation_kwargs)
99
  generation_texts = tokenizer.batch_decode(generated)
100
  progress_dummy_output = ''
101
  return ([progress_dummy_output] +
 
94
  interpretation_prompt = InterpretationPrompt(tokenizer, interpretation_prompt)
95
 
96
  # generate the interpretations
97
+ generate = generate_interpretation_gpu if use_gpu else lambda interpretation_prompt, **kwargs: interpretation_prompt.generate(**kwargs)
98
+ generated = generate(interpretation_prompt, model, {0: interpreted_vectors}, k=3, **generation_kwargs)
99
  generation_texts = tokenizer.batch_decode(generated)
100
  progress_dummy_output = ''
101
  return ([progress_dummy_output] +