kusumakar commited on
Commit
d0020d9
·
1 Parent(s): 90cafd4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -1,4 +1,3 @@
1
- import cv2
2
  import numpy as np
3
  from PIL import Image
4
  import streamlit as st
@@ -11,9 +10,6 @@ extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-capti
11
  tokeniser = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
12
 
13
  def generate_captions(image):
14
- #image = Image.open(image).convert("RGB
15
- image = np.asarray(image)
16
- image = cv2.resize(image, (224, 224))
17
  generated_caption = tokeniser.decode(model.generate(extractor(image, return_tensors="pt").pixel_values.to("cpu"))[0])
18
  sentence = generated_caption
19
  text_to_remove = "<|endoftext|>"
@@ -50,9 +46,10 @@ def main():
50
  if uploaded_file is not None:
51
  # load the image
52
  image = Image.open(uploaded_file).convert("RGB")
 
53
 
54
  # context as prompt
55
- prompt = generate_captions(uploaded_file)
56
  st.write("The Context is:", prompt)
57
 
58
  # display the image
 
 
1
  import numpy as np
2
  from PIL import Image
3
  import streamlit as st
 
10
  tokeniser = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
11
 
12
  def generate_captions(image):
 
 
 
13
  generated_caption = tokeniser.decode(model.generate(extractor(image, return_tensors="pt").pixel_values.to("cpu"))[0])
14
  sentence = generated_caption
15
  text_to_remove = "<|endoftext|>"
 
46
  if uploaded_file is not None:
47
  # load the image
48
  image = Image.open(uploaded_file).convert("RGB")
49
+ image = image.resize((224, 224))
50
 
51
  # context as prompt
52
+ prompt = generate_captions(image)
53
  st.write("The Context is:", prompt)
54
 
55
  # display the image