ReFT
zhengxuanzenwu neuronpedia commited on
Commit
a1bd4d6
·
verified ·
1 Parent(s): ad38e7f

README updates (#1)

Browse files

- README updates (de082e2d13204a854c00c47a455f77a42a330648)


Co-authored-by: Neuronpedia <[email protected]>

Files changed (1) hide show
  1. README.md +8 -0
README.md CHANGED
@@ -24,8 +24,15 @@ It is a single dictionary of subspaces for 16K concepts and serves as a drop-in
24
 
25
  ```python
26
  from huggingface_hub import hf_hub_download
 
 
27
  import pyvene as pv
28
 
 
 
 
 
 
29
  # Create an intervention.
30
  class Encoder(pv.CollectIntervention):
31
  """An intervention that reads concept latent from streams"""
@@ -38,6 +45,7 @@ class Encoder(pv.CollectIntervention):
38
 
39
  # Loading weights
40
  path_to_params = hf_hub_download(repo_id="pyvene/gemma-reft-2b-it-res", filename="l20/weight.pt")
 
41
  encoder = Encoder(embed_dim=params.shape[0], latent_dim=params.shape[1])
42
  encoder.proj.weight.data = params.float()
43
 
 
24
 
25
  ```python
26
  from huggingface_hub import hf_hub_download
27
+ from transformers import AutoModelForCausalLM, AutoTokenizer
28
+ import torch
29
  import pyvene as pv
30
 
31
+ # Load model and tokenizer
32
+ model_name = "google/gemma-2-2b-it"
33
+ model = AutoModelForCausalLM.from_pretrained(model_name).cuda()
34
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
35
+
36
  # Create an intervention.
37
  class Encoder(pv.CollectIntervention):
38
  """An intervention that reads concept latent from streams"""
 
45
 
46
  # Loading weights
47
  path_to_params = hf_hub_download(repo_id="pyvene/gemma-reft-2b-it-res", filename="l20/weight.pt")
48
+ params = torch.load(path_to_params)
49
  encoder = Encoder(embed_dim=params.shape[0], latent_dim=params.shape[1])
50
  encoder.proj.weight.data = params.float()
51