Update app.py
Browse files
app.py
CHANGED
@@ -94,8 +94,8 @@ def run_interpretation(global_state, raw_interpretation_prompt, max_new_tokens,
|
|
94 |
interpretation_prompt = InterpretationPrompt(tokenizer, interpretation_prompt)
|
95 |
|
96 |
# generate the interpretations
|
97 |
-
generate = generate_interpretation_gpu if use_gpu else lambda
|
98 |
-
generated = generate(model, {0: interpreted_vectors}, k=3, **generation_kwargs)
|
99 |
generation_texts = tokenizer.batch_decode(generated)
|
100 |
progress_dummy_output = ''
|
101 |
return ([progress_dummy_output] +
|
|
|
94 |
interpretation_prompt = InterpretationPrompt(tokenizer, interpretation_prompt)
|
95 |
|
96 |
# generate the interpretations
|
97 |
+
generate = generate_interpretation_gpu if use_gpu else lambda interpretation_prompt, **kwargs: interpretation_prompt.generate(**kwargs)
|
98 |
+
generated = generate(interpretation_prompt, model, {0: interpreted_vectors}, k=3, **generation_kwargs)
|
99 |
generation_texts = tokenizer.batch_decode(generated)
|
100 |
progress_dummy_output = ''
|
101 |
return ([progress_dummy_output] +
|