krishnapal2308 commited on
Commit
8692c47
1 Parent(s): 00eaff9

fixed example caching

Browse files
Files changed (1) hide show
  1. app.py +8 -17
app.py CHANGED
@@ -14,7 +14,7 @@ def process_image_and_generate_output(image, model_selection):
14
  return "Please select an image", None
15
  # (Trained only for 15 epochs without any hyperparameter tuning, utilizing inception v3)'
16
  # (SOTA model for Image captioning)
17
- if model_selection == ('Basic Model'):
18
  result = inference_script.evaluate(image)
19
  pred_caption = ' '.join(result).rsplit(' ', 1)[0]
20
  pred_caption = pred_caption.replace('<unk>', '')
@@ -39,22 +39,13 @@ def process_image_and_generate_output(image, model_selection):
39
  return pred_caption, audio_content
40
 
41
 
42
- # sample_images = [
43
- # [os.path.join(os.path.dirname(__file__), "sample_images/1.jpg"), 'Basic Model'],
44
- # [os.path.join(os.path.dirname(__file__), "sample_images/2.jpg"), 'Basic Model'],
45
- # [os.path.join(os.path.dirname(__file__), "sample_images/3.jpg"), 'Basic Model'],
46
- # [os.path.join(os.path.dirname(__file__), "sample_images/4.jpg"), "ViT-GPT2"],
47
- # [os.path.join(os.path.dirname(__file__), "sample_images/5.jpg"), "ViT-GPT2"],
48
- # [os.path.join(os.path.dirname(__file__), "sample_images/6.jpg"), "ViT-GPT2"]
49
- # ]
50
-
51
  sample_images = [
52
- ["sample_images/1.jpg"],
53
- ["sample_images/2.jpg"],
54
- ["sample_images/3.jpg"],
55
- ["sample_images/4.jpg"],
56
- ["sample_images/5.jpg"],
57
- ["sample_images/6.jpg"]
58
  ]
59
 
60
 
@@ -70,7 +61,7 @@ iface = gr.Interface(fn=process_image_and_generate_output,
70
  inputs=[image_input, model_selection_input],
71
  outputs=["text", "audio"],
72
  examples=sample_images,
73
- cache_examples=False,
74
  allow_flagging='never',
75
  title="Eye For Blind | Image Captioning & TTS",
76
  description="To be added")
 
14
  return "Please select an image", None
15
  # (Trained only for 15 epochs without any hyperparameter tuning, utilizing inception v3)'
16
  # (SOTA model for Image captioning)
17
+ if model_selection == 'Basic Model':
18
  result = inference_script.evaluate(image)
19
  pred_caption = ' '.join(result).rsplit(' ', 1)[0]
20
  pred_caption = pred_caption.replace('<unk>', '')
 
39
  return pred_caption, audio_content
40
 
41
 
 
 
 
 
 
 
 
 
 
42
  sample_images = [
43
+ [os.path.join(os.path.dirname(__file__), "sample_images/1.jpg"), "ViT-GPT2"],
44
+ [os.path.join(os.path.dirname(__file__), "sample_images/1.jpg"), 'Basic Model'],
45
+ [os.path.join(os.path.dirname(__file__), "sample_images/2.jpg"), "ViT-GPT2"],
46
+ [os.path.join(os.path.dirname(__file__), "sample_images/2.jpg"), 'Basic Model'],
47
+ [os.path.join(os.path.dirname(__file__), "sample_images/3.jpg"), "ViT-GPT2"],
48
+ [os.path.join(os.path.dirname(__file__), "sample_images/3.jpg"), 'Basic Model']
49
  ]
50
 
51
 
 
61
  inputs=[image_input, model_selection_input],
62
  outputs=["text", "audio"],
63
  examples=sample_images,
64
+ cache_examples=True,
65
  allow_flagging='never',
66
  title="Eye For Blind | Image Captioning & TTS",
67
  description="To be added")