krishnapal2308 commited on
Commit
00eaff9
1 Parent(s): 090ccb6

gradio's cache_example = False

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -12,13 +12,13 @@ warnings.filterwarnings('ignore')
12
  def process_image_and_generate_output(image, model_selection):
13
  if image is None:
14
  return "Please select an image", None
15
-
16
- if model_selection == ('Basic Model (Trained only for 15 epochs without any hyperparameter tuning, utilizing '
17
- 'inception v3)'):
18
  result = inference_script.evaluate(image)
19
  pred_caption = ' '.join(result).rsplit(' ', 1)[0]
20
  pred_caption = pred_caption.replace('<unk>', '')
21
- elif model_selection == 'ViT-GPT2 (SOTA model for Image captioning)':
22
  result = vit_gpt2.predict_step(image)
23
  pred_caption = result[0]
24
  else:
@@ -40,12 +40,12 @@ def process_image_and_generate_output(image, model_selection):
40
 
41
 
42
  # sample_images = [
43
- # [os.path.join(os.path.dirname(__file__), "sample_images/1.jpg")],
44
- # [os.path.join(os.path.dirname(__file__), "sample_images/2.jpg")],
45
- # [os.path.join(os.path.dirname(__file__), "sample_images/3.jpg")],
46
- # [os.path.join(os.path.dirname(__file__), "sample_images/4.jpg")],
47
- # [os.path.join(os.path.dirname(__file__), "sample_images/5.jpg")],
48
- # [os.path.join(os.path.dirname(__file__), "sample_images/6.jpg")]
49
  # ]
50
 
51
  sample_images = [
@@ -62,15 +62,15 @@ sample_images = [
62
  image_input = gr.Image(label="Upload Image")
63
 
64
  # Create a dropdown to choose the model
65
- model_selection_input = gr.Radio(["Basic Model (Trained only for 15 epochs without any hyperparameter "
66
- "tuning, utilizing inception v3)",
67
- "ViT-GPT2 (SOTA model for Image captioning)"],
68
  label="Choose Model")
69
 
70
  iface = gr.Interface(fn=process_image_and_generate_output,
71
  inputs=[image_input, model_selection_input],
72
  outputs=["text", "audio"],
73
  examples=sample_images,
 
74
  allow_flagging='never',
75
  title="Eye For Blind | Image Captioning & TTS",
76
  description="To be added")
 
12
  def process_image_and_generate_output(image, model_selection):
13
  if image is None:
14
  return "Please select an image", None
15
+ # (Trained only for 15 epochs without any hyperparameter tuning, utilizing inception v3)'
16
+ # (SOTA model for Image captioning)
17
+ if model_selection == ('Basic Model'):
18
  result = inference_script.evaluate(image)
19
  pred_caption = ' '.join(result).rsplit(' ', 1)[0]
20
  pred_caption = pred_caption.replace('<unk>', '')
21
+ elif model_selection == 'ViT-GPT2':
22
  result = vit_gpt2.predict_step(image)
23
  pred_caption = result[0]
24
  else:
 
40
 
41
 
42
  # sample_images = [
43
+ # [os.path.join(os.path.dirname(__file__), "sample_images/1.jpg"), 'Basic Model'],
44
+ # [os.path.join(os.path.dirname(__file__), "sample_images/2.jpg"), 'Basic Model'],
45
+ # [os.path.join(os.path.dirname(__file__), "sample_images/3.jpg"), 'Basic Model'],
46
+ # [os.path.join(os.path.dirname(__file__), "sample_images/4.jpg"), "ViT-GPT2"],
47
+ # [os.path.join(os.path.dirname(__file__), "sample_images/5.jpg"), "ViT-GPT2"],
48
+ # [os.path.join(os.path.dirname(__file__), "sample_images/6.jpg"), "ViT-GPT2"]
49
  # ]
50
 
51
  sample_images = [
 
62
  image_input = gr.Image(label="Upload Image")
63
 
64
  # Create a dropdown to choose the model
65
+ model_selection_input = gr.Radio(["Basic Model",
66
+ "ViT-GPT2"],
 
67
  label="Choose Model")
68
 
69
  iface = gr.Interface(fn=process_image_and_generate_output,
70
  inputs=[image_input, model_selection_input],
71
  outputs=["text", "audio"],
72
  examples=sample_images,
73
+ cache_examples=False,
74
  allow_flagging='never',
75
  title="Eye For Blind | Image Captioning & TTS",
76
  description="To be added")