Spaces:
Running
on
Zero
Running
on
Zero
Testing cached tensor
Browse files
app.py
CHANGED
@@ -39,7 +39,6 @@ You can use this demo to :
|
|
39 |
5. Click on the "Submit" button to generate text with the selected feature clamped to the selected strength.
|
40 |
"""
|
41 |
|
42 |
-
cached_tensor = None
|
43 |
topk_indices = None
|
44 |
|
45 |
sunglasses_file_path = "assets/sunglasses.jpg"
|
@@ -263,6 +262,7 @@ with gr.Blocks() as demo:
|
|
263 |
|
264 |
|
265 |
if __name__ == "__main__":
|
|
|
266 |
tokenizer = AutoTokenizer.from_pretrained("llava-hf/llama3-llava-next-8b-hf")
|
267 |
sae = load_single_sae("lmms-lab/llama3-llava-next-8b-hf-sae-131k", "model.layers.24")
|
268 |
model, processor = maybe_load_llava_model(
|
|
|
39 |
5. Click on the "Submit" button to generate text with the selected feature clamped to the selected strength.
|
40 |
"""
|
41 |
|
|
|
42 |
topk_indices = None
|
43 |
|
44 |
sunglasses_file_path = "assets/sunglasses.jpg"
|
|
|
262 |
|
263 |
|
264 |
if __name__ == "__main__":
|
265 |
+
cached_tensor = None
|
266 |
tokenizer = AutoTokenizer.from_pretrained("llava-hf/llama3-llava-next-8b-hf")
|
267 |
sae = load_single_sae("lmms-lab/llama3-llava-next-8b-hf-sae-131k", "model.layers.24")
|
268 |
model, processor = maybe_load_llava_model(
|