iamrobotbear commited on
Commit
8233dd1
·
1 Parent(s): afa4c81

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -4,15 +4,15 @@ from PIL import Image
4
  import pandas as pd
5
  from lavis.models import load_model_and_preprocess
6
  from lavis.processors import load_processor
7
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # Import AutoTokenizer and AutoModelForSeq2SeqLM
8
 
9
  # Load model and preprocessors for Image-Text Matching (LAVIS)
10
  device = torch.device("cuda") if torch.cuda.is_available() else "cpu"
11
  model_itm, vis_processors, text_processors = load_model_and_preprocess("blip2_image_text_matching", "pretrain", device=device, is_eval=True)
12
 
13
- # Load model and tokenizer for Image Captioning (TextCaps)
14
- model_caption = AutoModelForSeq2SeqLM.from_pretrained("microsoft/git-large-r-textcaps")
15
  tokenizer_caption = AutoTokenizer.from_pretrained("microsoft/git-large-r-textcaps")
 
16
 
17
  # List of statements for Image-Text Matching
18
  statements = [
 
4
  import pandas as pd
5
  from lavis.models import load_model_and_preprocess
6
  from lavis.processors import load_processor
7
+ from transformers import AutoTokenizer, AutoModelForCausalLM
8
 
9
  # Load model and preprocessors for Image-Text Matching (LAVIS)
10
  device = torch.device("cuda") if torch.cuda.is_available() else "cpu"
11
  model_itm, vis_processors, text_processors = load_model_and_preprocess("blip2_image_text_matching", "pretrain", device=device, is_eval=True)
12
 
13
+ # Load tokenizer and model for Image Captioning (TextCaps)
 
14
  tokenizer_caption = AutoTokenizer.from_pretrained("microsoft/git-large-r-textcaps")
15
+ model_caption = AutoModelForCausalLM.from_pretrained("microsoft/git-large-r-textcaps")
16
 
17
  # List of statements for Image-Text Matching
18
  statements = [