anas-awadalla commited on
Commit
64c725f
·
1 Parent(s): 05bbe54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -56,12 +56,12 @@ model, image_processor, tokenizer = create_model_and_transforms(
56
  clip_vision_encoder_path="ViT-L-14",
57
  lang_encoder_path="anas-awadalla/mpt-7b",
58
  tokenizer_path="anas-awadalla/mpt-7b",
59
- cross_attn_every_n_layers=1,
 
60
  )
61
 
62
  checkpoint_path = hf_hub_download("openflamingo/OpenFlamingo-9B-vitl-mpt7b", "checkpoint.pt")
63
  model.load_state_dict(torch.load(checkpoint_path), strict=False)
64
- model = model.to(0, dtype=torch.bfloat16)
65
  model.eval()
66
 
67
  def generate(
 
56
  clip_vision_encoder_path="ViT-L-14",
57
  lang_encoder_path="anas-awadalla/mpt-7b",
58
  tokenizer_path="anas-awadalla/mpt-7b",
59
+ cross_attn_every_n_layers=4,
60
+ device=0
61
  )
62
 
63
  checkpoint_path = hf_hub_download("openflamingo/OpenFlamingo-9B-vitl-mpt7b", "checkpoint.pt")
64
  model.load_state_dict(torch.load(checkpoint_path), strict=False)
 
65
  model.eval()
66
 
67
  def generate(