AbstractPhil commited on
Commit
6dfa555
·
verified ·
1 Parent(s): fb60b42

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +12 -2
README.md CHANGED
@@ -123,6 +123,9 @@ def main():
123
 
124
  ```
125
 
 
 
 
126
 
127
  You can inference the test version using stable-diffusion-15 as an example test.
128
  The CLIP_L responses fall apart when too many nodes hit those guidance bells, but it's definitely a powerful first test using divergent systems.
@@ -171,6 +174,10 @@ t5_mod = T5EncoderModel.from_pretrained(
171
  torch_dtype=DTYPE
172
  ).to(DEVICE).eval()
173
 
 
 
 
 
174
  # 1c) Adapter
175
  import torch
176
  import torch.nn as nn
@@ -364,19 +371,22 @@ scheduler = EulerAncestralDiscreteScheduler.from_pretrained(
364
  "runwayml/stable-diffusion-v1-5", subfolder="scheduler"
365
  )
366
 
367
- # 1c) T5 + Adapter
368
  t5_tok = T5TokenizerFast.from_pretrained("t5-small")
369
  t5_mod = T5EncoderModel.from_pretrained(
370
  "AbstractPhil/T5-Small-Human-Attentive-Try2-Pass3",
371
  torch_dtype=DTYPE
372
  ).to(DEVICE).eval()
373
 
 
374
  adapter = RobustVelocityAdapter(out_tokens=64).to(DEVICE).eval()
375
- state = load_safetensors("roba_adapter_step_19500.safetensors", device="cpu")
376
  clean = {k.replace("_orig_mod.", ""): v for k, v in state.items()}
377
  adapter.load_state_dict(clean, strict=False)
378
  adapter.to(DEVICE).eval()
379
 
 
 
380
  # 2) GENERATION FUNCTION
381
  @torch.no_grad()
382
  def generate_image_with_adapter(
 
123
 
124
  ```
125
 
126
+ ### You'll need to snip out the __orig layer extensions that got snapped into it when I saved.
127
+ Still not quite sure how to fix that without just editing before saving, but I think it's causing some sort of additional effects that I'm unaware of.
128
+ I don't want to save as pt because they are considered unsafe and I don't want this to be considered unsafe for use.
129
 
130
  You can inference the test version using stable-diffusion-15 as an example test.
131
  The CLIP_L responses fall apart when too many nodes hit those guidance bells, but it's definitely a powerful first test using divergent systems.
 
174
  torch_dtype=DTYPE
175
  ).to(DEVICE).eval()
176
 
177
+ # 1c) Velocity Adapter local directory
178
+ local_adapter_directory = "roba_adapter_step_19500.safetensors" # opens the state below.
179
+
180
+
181
  # 1c) Adapter
182
  import torch
183
  import torch.nn as nn
 
371
  "runwayml/stable-diffusion-v1-5", subfolder="scheduler"
372
  )
373
 
374
+ # 1c) T5
375
  t5_tok = T5TokenizerFast.from_pretrained("t5-small")
376
  t5_mod = T5EncoderModel.from_pretrained(
377
  "AbstractPhil/T5-Small-Human-Attentive-Try2-Pass3",
378
  torch_dtype=DTYPE
379
  ).to(DEVICE).eval()
380
 
381
+ # 1d) velocity prediction adapter
382
  adapter = RobustVelocityAdapter(out_tokens=64).to(DEVICE).eval()
383
+ state = load_safetensors(local_adapter_directory, device="cpu")
384
  clean = {k.replace("_orig_mod.", ""): v for k, v in state.items()}
385
  adapter.load_state_dict(clean, strict=False)
386
  adapter.to(DEVICE).eval()
387
 
388
+
389
+
390
  # 2) GENERATION FUNCTION
391
  @torch.no_grad()
392
  def generate_image_with_adapter(