chats-bug commited on
Commit
f3fbfca
·
1 Parent(s): d3bbf05

Deactivated git large coco

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -14,8 +14,8 @@ preprocessor_blip_large = AutoProcessor.from_pretrained("Salesforce/blip-image-c
14
  model_blip_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
15
 
16
  # Load the GIT coco model
17
- preprocessor_git_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-coco")
18
- model_git_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
19
 
20
  # Load the CLIP model
21
  model_oc_coca, _, transform_oc_coca = open_clip.create_model_and_transforms(
@@ -136,7 +136,7 @@ def generate_captions(
136
  caption_blip_large = generate_caption(preprocessor_blip_large, model_blip_large, image).strip()
137
 
138
  # Generate captions for the image using the GIT coco model
139
- caption_git_large_coco = generate_caption(preprocessor_git_large_coco, model_git_large_coco, image).strip()
140
 
141
  # Generate captions for the image using the CLIP model
142
  caption_oc_coca = generate_captions_clip(model_oc_coca, transform_oc_coca, image).strip()
@@ -157,7 +157,7 @@ iface = gr.Interface(
157
  outputs=[
158
  gr.outputs.Textbox(label="Blip base"),
159
  gr.outputs.Textbox(label="Blip large"),
160
- gr.outputs.Textbox(label="GIT large coco"),
161
  gr.outputs.Textbox(label="CLIP"),
162
  ],
163
  title="Image Captioning",
 
14
  model_blip_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
15
 
16
  # Load the GIT coco model
17
+ # preprocessor_git_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-coco")
18
+ # model_git_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
19
 
20
  # Load the CLIP model
21
  model_oc_coca, _, transform_oc_coca = open_clip.create_model_and_transforms(
 
136
  caption_blip_large = generate_caption(preprocessor_blip_large, model_blip_large, image).strip()
137
 
138
  # Generate captions for the image using the GIT coco model
139
+ # caption_git_large_coco = generate_caption(preprocessor_git_large_coco, model_git_large_coco, image).strip()
140
 
141
  # Generate captions for the image using the CLIP model
142
  caption_oc_coca = generate_captions_clip(model_oc_coca, transform_oc_coca, image).strip()
 
157
  outputs=[
158
  gr.outputs.Textbox(label="Blip base"),
159
  gr.outputs.Textbox(label="Blip large"),
160
+ # gr.outputs.Textbox(label="GIT large coco"),
161
  gr.outputs.Textbox(label="CLIP"),
162
  ],
163
  title="Image Captioning",