Spaces:
Sleeping
Sleeping
nishantguvvada
commited on
Commit
•
efb91fe
1
Parent(s):
2f89c4d
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import streamlit as st
|
|
|
2 |
import tensorflow as tf
|
3 |
import cv2
|
4 |
import numpy as np
|
@@ -64,15 +65,13 @@ def standardize(inputs):
|
|
64 |
)
|
65 |
|
66 |
# Choose the most frequent words from the vocabulary & remove punctuation etc.
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
output_sequence_length=MAX_CAPTION_LEN,
|
71 |
-
)
|
72 |
|
73 |
# Lookup table: Word -> Index
|
74 |
word_to_index = StringLookup(
|
75 |
-
mask_token="", vocabulary=tokenizer
|
76 |
)
|
77 |
|
78 |
|
@@ -102,7 +101,7 @@ def predict_caption(file):
|
|
102 |
chosen_id = tf.random.categorical([top_probs], 1)[0].numpy()
|
103 |
predicted_id = top_idxs.numpy()[chosen_id][0]
|
104 |
|
105 |
-
result.append(tokenizer
|
106 |
|
107 |
if predicted_id == word_to_index("<end>"):
|
108 |
return img, result
|
|
|
1 |
import streamlit as st
|
2 |
+
import pickle
|
3 |
import tensorflow as tf
|
4 |
import cv2
|
5 |
import numpy as np
|
|
|
65 |
)
|
66 |
|
67 |
# Choose the most frequent words from the vocabulary & remove punctuation etc.
|
68 |
+
file = open('./tokenizer.txt', 'rb')
|
69 |
+
tokenizer = pickle.load(file)
|
70 |
+
|
|
|
|
|
71 |
|
72 |
# Lookup table: Word -> Index
|
73 |
word_to_index = StringLookup(
|
74 |
+
mask_token="", vocabulary=tokenizer
|
75 |
)
|
76 |
|
77 |
|
|
|
101 |
chosen_id = tf.random.categorical([top_probs], 1)[0].numpy()
|
102 |
predicted_id = top_idxs.numpy()[chosen_id][0]
|
103 |
|
104 |
+
result.append(tokenizer[predicted_id])
|
105 |
|
106 |
if predicted_id == word_to_index("<end>"):
|
107 |
return img, result
|