mshukor commited on
Commit
402d77e
1 Parent(s): 0794d2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -42
app.py CHANGED
@@ -64,39 +64,39 @@ use_fp16 = False
64
 
65
  checkpoint_path = 'checkpoints/unival_s2_hs/checkpoint1.pt'
66
 
67
- # Load ckpt & config for Image Captioning
68
- caption_overrides={"eval_cider":False, "beam":5, "max_len_b":22, "no_repeat_ngram_size":3, "seed":7, "unnormalized": False,
69
- "bpe_dir":"utils/BPE", "video_model_path": None,}
70
-
71
- caption_models, caption_cfg, caption_task = checkpoint_utils.load_model_ensemble_and_task(
72
- utils.split_paths(checkpoint_path),
73
- arg_overrides=caption_overrides
74
- )
75
-
76
- # Load ckpt & config for Refcoco
77
- refcoco_overrides = {"bpe_dir":"utils/BPE", "video_model_path": None}
78
-
79
- refcoco_models, refcoco_cfg, refcoco_task = checkpoint_utils.load_model_ensemble_and_task(
80
- utils.split_paths(checkpoint_path),
81
- arg_overrides=refcoco_overrides
82
- )
83
- refcoco_cfg.common.seed = 7
84
- refcoco_cfg.generation.beam = 5
85
- refcoco_cfg.generation.min_len = 4
86
- refcoco_cfg.generation.max_len_a = 0
87
- refcoco_cfg.generation.max_len_b = 4
88
- refcoco_cfg.generation.no_repeat_ngram_size = 3
89
-
90
- # Load pretrained ckpt & config for VQA
91
- parser = options.get_generation_parser()
92
- input_args = ["", "--task=vqa_gen", "--beam=100", "--unnormalized", f"--path={checkpoint_path}", "--bpe-dir=utils/BPE"]
93
- args = options.parse_args_and_arch(parser, input_args)
94
- vqa_cfg = convert_namespace_to_omegaconf(args)
95
- vqa_task = tasks.setup_task(vqa_cfg.task)
96
- vqa_models, vqa_cfg = checkpoint_utils.load_model_ensemble(
97
- utils.split_paths(vqa_cfg.common_eval.path),
98
- task=vqa_task
99
- )
100
 
101
  # Load pretrained ckpt & config for Generic Interface
102
  parser = options.get_generation_parser()
@@ -110,17 +110,17 @@ general_models, general_cfg = checkpoint_utils.load_model_ensemble(
110
  )
111
 
112
  # move models to gpu
113
- move2gpu(caption_models, caption_cfg)
114
- move2gpu(refcoco_models, refcoco_cfg)
115
- move2gpu(vqa_models, vqa_cfg)
116
  move2gpu(general_models, general_cfg)
117
 
118
- # Initialize generator
119
- caption_generator = caption_task.build_generator(caption_models, caption_cfg.generation)
120
- refcoco_generator = refcoco_task.build_generator(refcoco_models, refcoco_cfg.generation)
121
- vqa_generator = vqa_task.build_generator(vqa_models, vqa_cfg.generation)
122
- vqa_generator.zero_shot = True
123
- vqa_generator.constraint_trie = None
124
  general_generator = general_task.build_generator(general_models, general_cfg.generation)
125
 
126
  # Construct image transforms
 
64
 
65
  checkpoint_path = 'checkpoints/unival_s2_hs/checkpoint1.pt'
66
 
67
+ # # Load ckpt & config for Image Captioning
68
+ # caption_overrides={"eval_cider":False, "beam":5, "max_len_b":22, "no_repeat_ngram_size":3, "seed":7, "unnormalized": False,
69
+ # "bpe_dir":"utils/BPE", "video_model_path": None,}
70
+
71
+ # caption_models, caption_cfg, caption_task = checkpoint_utils.load_model_ensemble_and_task(
72
+ # utils.split_paths(checkpoint_path),
73
+ # arg_overrides=caption_overrides
74
+ # )
75
+
76
+ # # Load ckpt & config for Refcoco
77
+ # refcoco_overrides = {"bpe_dir":"utils/BPE", "video_model_path": None}
78
+
79
+ # refcoco_models, refcoco_cfg, refcoco_task = checkpoint_utils.load_model_ensemble_and_task(
80
+ # utils.split_paths(checkpoint_path),
81
+ # arg_overrides=refcoco_overrides
82
+ # )
83
+ # refcoco_cfg.common.seed = 7
84
+ # refcoco_cfg.generation.beam = 5
85
+ # refcoco_cfg.generation.min_len = 4
86
+ # refcoco_cfg.generation.max_len_a = 0
87
+ # refcoco_cfg.generation.max_len_b = 4
88
+ # refcoco_cfg.generation.no_repeat_ngram_size = 3
89
+
90
+ # # Load pretrained ckpt & config for VQA
91
+ # parser = options.get_generation_parser()
92
+ # input_args = ["", "--task=vqa_gen", "--beam=100", "--unnormalized", f"--path={checkpoint_path}", "--bpe-dir=utils/BPE"]
93
+ # args = options.parse_args_and_arch(parser, input_args)
94
+ # vqa_cfg = convert_namespace_to_omegaconf(args)
95
+ # vqa_task = tasks.setup_task(vqa_cfg.task)
96
+ # vqa_models, vqa_cfg = checkpoint_utils.load_model_ensemble(
97
+ # utils.split_paths(vqa_cfg.common_eval.path),
98
+ # task=vqa_task
99
+ # )
100
 
101
  # Load pretrained ckpt & config for Generic Interface
102
  parser = options.get_generation_parser()
 
110
  )
111
 
112
  # move models to gpu
113
+ # move2gpu(caption_models, caption_cfg)
114
+ # move2gpu(refcoco_models, refcoco_cfg)
115
+ # move2gpu(vqa_models, vqa_cfg)
116
  move2gpu(general_models, general_cfg)
117
 
118
+ # # Initialize generator
119
+ # caption_generator = caption_task.build_generator(caption_models, caption_cfg.generation)
120
+ # refcoco_generator = refcoco_task.build_generator(refcoco_models, refcoco_cfg.generation)
121
+ # vqa_generator = vqa_task.build_generator(vqa_models, vqa_cfg.generation)
122
+ # vqa_generator.zero_shot = True
123
+ # vqa_generator.constraint_trie = None
124
  general_generator = general_task.build_generator(general_models, general_cfg.generation)
125
 
126
  # Construct image transforms