sms07 commited on
Commit
58391cf
·
1 Parent(s): eb768aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -1
app.py CHANGED
@@ -12,13 +12,15 @@ def main():
12
  st.write("Drag and drop an image file here.")
13
 
14
  # Allow the user to upload an image file
15
- uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
16
 
17
  if uploaded_file is not None:
18
  # Display the uploaded image
19
  image = Image.open(uploaded_file)
20
  st.image(image, caption="Uploaded Image", use_column_width=True)
21
 
 
 
22
  # Model 1.
23
  # Model 1 gets input from the user.
24
  # User -> Model 1
@@ -62,3 +64,98 @@ gpt2_pipeline = pipeline(task="text-generation", model="gpt2")
62
  topic_model_1 = BERTopic.load(path="davanstrien/chat_topics")
63
  topic_model_2 = BERTopic.load(path="MaartenGr/BERTopic_ArXiv")
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  st.write("Drag and drop an image file here.")
13
 
14
  # Allow the user to upload an image file
15
+ image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
16
 
17
  if uploaded_file is not None:
18
  # Display the uploaded image
19
  image = Image.open(uploaded_file)
20
  st.image(image, caption="Uploaded Image", use_column_width=True)
21
 
22
+ question = st.text_input("What's your question?")
23
+
24
  # Model 1.
25
  # Model 1 gets input from the user.
26
  # User -> Model 1
 
64
  topic_model_1 = BERTopic.load(path="davanstrien/chat_topics")
65
  topic_model_2 = BERTopic.load(path="MaartenGr/BERTopic_ArXiv")
66
 
67
+ ###############################################################################
68
+
69
+ # 5 MODEL INFERENCES.
70
+ # User Input = Image + Question About The Image.
71
+ # User -> Model 1 -> Model 2 -> Model 3 -> Model 4 -> Model 5
72
+
73
+ # Model 1.
74
+
75
+ vqa_pipeline_output = vqa_pipeline(image, question, top_k=5)[0]
76
+
77
+ # Model 2.
78
+
79
+ text = (
80
+ "I love "
81
+ + str(vqa_pipeline_output["answer"])
82
+ + " and I would like to know how to [MASK]."
83
+ )
84
+ bbu_pipeline_output = bbu_pipeline(text)
85
+
86
+ # Model 3.
87
+
88
+ utterance = bbu_pipeline_output[0]["sequence"]
89
+ inputs = tokenizer(utterance, return_tensors="pt")
90
+ result = facebook_model.generate(**inputs)
91
+ facebook_model_output = tokenizer.decode(result[0])
92
+
93
+ # Model 4.
94
+
95
+ facebook_model_output = facebook_model_output.replace("<s> ", "")
96
+ facebook_model_output = facebook_model_output.replace("<s>", "")
97
+ facebook_model_output = facebook_model_output.replace("</s>", "")
98
+ gpt2_pipeline_output = gpt2_pipeline(facebook_model_output)[0]["generated_text"]
99
+
100
+ # Model 5.
101
+
102
+ topic, prob = topic_model_1.transform(gpt2_pipeline_output)
103
+ topic_model_1_output = topic_model_1.get_topic_info(topic[0])["Representation"][
104
+ 0
105
+ ]
106
+
107
+ topic, prob = topic_model_2.transform(gpt2_pipeline_output)
108
+ topic_model_2_output = topic_model_2.get_topic_info(topic[0])["Representation"][
109
+ 0
110
+ ]
111
+ ###############################################################################
112
+
113
+ print()
114
+
115
+ print("-" * 150)
116
+ print("vqa_pipeline_output = ", vqa_pipeline_output)
117
+ print("bbu_pipeline_output =", bbu_pipeline_output)
118
+ print("facebook_model_output =", facebook_model_output)
119
+ print("gpt2_pipeline_output =", gpt2_pipeline_output)
120
+ print("topic_model_1_output =", topic_model_1_output)
121
+ print("topic_model_2_output =", topic_model_2_output)
122
+
123
+ print()
124
+
125
+ print("-" * 150)
126
+ print("SUMMARY")
127
+ print("-" * 7)
128
+ print("Your Image:", image)
129
+ print("Your Question:", question)
130
+ print("-" * 100)
131
+ print(
132
+ "1. Highest Predicted Answer For Your Question:",
133
+ vqa_pipeline_output["answer"],
134
+ "\n",
135
+ )
136
+ print(text)
137
+ print(
138
+ "2. Highest Predicted Sequence On [MASK] Based on 1.:",
139
+ bbu_pipeline_output[0]["sequence"],
140
+ "\n",
141
+ )
142
+ print(
143
+ "3. Conversation Based On Previous Answer Based on 2.:",
144
+ facebook_model_output,
145
+ "\n",
146
+ )
147
+ print(
148
+ "4. Text Generated Based On Previous Answer Based on 3.:",
149
+ gpt2_pipeline_output,
150
+ "\n",
151
+ )
152
+ print(
153
+ "5. Highest Predicted Topic Model_1 For Previous The Answer Based on 4.:",
154
+ topic_model_1_output,
155
+ "\n",
156
+ )
157
+ print(
158
+ "6. Highest Predicted Topic Model_2 For Previous The Answer Based on 4.:",
159
+ topic_model_2_output,
160
+ )
161
+ print("-" * 150)