DanielSc4 commited on
Commit
6534dfb
1 Parent(s): 2180e70

now working

Browse files
Files changed (1) hide show
  1. app.py +77 -24
app.py CHANGED
@@ -56,7 +56,7 @@ def get_lda(n_components):
56
 
57
  print('[x] Init LDA model')
58
  lda_model = LatentDirichletAllocation(
59
- n_components=5,
60
  max_iter=10,
61
  learning_method='online',
62
  random_state=100,
@@ -65,7 +65,7 @@ def get_lda(n_components):
65
  n_jobs = -1,
66
  verbose=1,
67
  )
68
-
69
  print('[x] Fitting LDA model')
70
  lda_output = lda_model.fit_transform(data_vectorized)
71
  print(lda_model) # Model attributes
@@ -87,13 +87,16 @@ def get_lda(n_components):
87
  print('[x] Getting LDA output')
88
  lda_output = best_lda_model.transform(data_vectorized)
89
 
 
90
  topicnames = ["Topic" + str(i) for i in range(best_lda_model.n_components)]
91
  docnames = ["Doc" + str(i) for i in range(len(data))]
92
  df_document_topic = pd.DataFrame(np.round(lda_output, 2), columns=topicnames, index=docnames)
93
 
 
94
  dominant_topic = np.argmax(df_document_topic.values, axis=1)
95
  df_document_topic["dominant_topic"] = dominant_topic
96
 
 
97
  # Topic-Keyword Matrix
98
  df_topic_keywords = pd.DataFrame(best_lda_model.components_)
99
  df_topic_keywords
@@ -101,6 +104,7 @@ def get_lda(n_components):
101
  df_topic_keywords.columns = vectorizer.get_feature_names_out()
102
  df_topic_keywords.index = topicnames
103
 
 
104
  # Show top n keywords for each topic
105
  def show_topics(vectorizer=vectorizer, lda_model=lda_model, n_words=20):
106
  keywords = np.array(vectorizer.get_feature_names_out())
@@ -122,6 +126,7 @@ def get_lda(n_components):
122
  df_topic_keywords["Topics"] = topics
123
  df_topic_keywords
124
 
 
125
  # Define function to predict topic for a given text document.
126
  def predict_topic(text, nlp=nlp):
127
  global sent_to_words
@@ -142,9 +147,9 @@ def get_lda(n_components):
142
  #topic_guess = df_topic_keywords.iloc[np.argmax(topic_probability_scores), Topics]
143
  return infer_topic, topic, topic_probability_scores
144
 
145
- # Predict the topic
146
- mytext = ["This is a test of a random topic where I talk about politics"]
147
- infer_topic, topic, prob_scores = predict_topic(text = mytext, nlp=nlp)
148
 
149
  def apply_predict_topic(text):
150
  text = [text]
@@ -153,16 +158,60 @@ def get_lda(n_components):
153
 
154
  df["Topic_key_word"] = df['comment'].apply(apply_predict_topic)
155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
- # plot
158
- subreddits = df.subreddit.value_counts().index[:22]
 
159
 
160
- weight_counts = {
161
- t: [
162
- df[df.Topic_key_word == t].subreddit.value_counts()[subreddit] / df.subreddit.value_counts()[subreddit] for subreddit in subreddits
163
- ] for t in topics
164
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
 
 
 
 
 
 
 
166
  irony_percs = {
167
  t: [
168
  len(
@@ -175,7 +224,7 @@ def get_lda(n_components):
175
  }
176
  width = 0.9
177
 
178
- fig, ax = plt.subplots(figsize = (10, 7))
179
  plt.axhline(0.5, color = 'red', ls=":", alpha = .3)
180
 
181
  bottom = np.zeros(len(subreddits))
@@ -187,9 +236,11 @@ def get_lda(n_components):
187
 
188
  ax.set_title("Perc of topics for each subreddit")
189
  ax.legend(loc="upper right")
190
- plt.xticks(rotation=70)
 
 
191
 
192
- return fig
193
 
194
 
195
  # def main():
@@ -202,18 +253,20 @@ with gr.Blocks() as demo:
202
  gr.Markdown("### Questo 猫 un sottotitolo")
203
  # gradio.Dataframe(路路路)
204
 
205
- n_comp = gr.Slider(2, 25, value=5, step = 1, label="N components", info="Scegli il numero di componenti per LDA"),
206
 
207
  btn = gr.Button(value="Submit")
208
-
209
- plot = gr.Plot(label="Plot")
210
 
211
- btn.click(get_lda, inputs=[n_comp[0]], outputs=[plot])
212
-
213
-
214
-
215
-
216
- # demo.load(main, inputs=[], outputs=[plot])
 
 
 
 
 
217
 
218
 
219
  # iface = gr.Interface(fn=greet, inputs="text", outputs="text")
 
56
 
57
  print('[x] Init LDA model')
58
  lda_model = LatentDirichletAllocation(
59
+ n_components=n_components,
60
  max_iter=10,
61
  learning_method='online',
62
  random_state=100,
 
65
  n_jobs = -1,
66
  verbose=1,
67
  )
68
+
69
  print('[x] Fitting LDA model')
70
  lda_output = lda_model.fit_transform(data_vectorized)
71
  print(lda_model) # Model attributes
 
87
  print('[x] Getting LDA output')
88
  lda_output = best_lda_model.transform(data_vectorized)
89
 
90
+ print('[x] Assigning topics')
91
  topicnames = ["Topic" + str(i) for i in range(best_lda_model.n_components)]
92
  docnames = ["Doc" + str(i) for i in range(len(data))]
93
  df_document_topic = pd.DataFrame(np.round(lda_output, 2), columns=topicnames, index=docnames)
94
 
95
+ print('[x] Checking dominant topics')
96
  dominant_topic = np.argmax(df_document_topic.values, axis=1)
97
  df_document_topic["dominant_topic"] = dominant_topic
98
 
99
+
100
  # Topic-Keyword Matrix
101
  df_topic_keywords = pd.DataFrame(best_lda_model.components_)
102
  df_topic_keywords
 
104
  df_topic_keywords.columns = vectorizer.get_feature_names_out()
105
  df_topic_keywords.index = topicnames
106
 
107
+ print('[x] Computing word-topic association')
108
  # Show top n keywords for each topic
109
  def show_topics(vectorizer=vectorizer, lda_model=lda_model, n_words=20):
110
  keywords = np.array(vectorizer.get_feature_names_out())
 
126
  df_topic_keywords["Topics"] = topics
127
  df_topic_keywords
128
 
129
+ print('[x] Predicting dominant topic for each document')
130
  # Define function to predict topic for a given text document.
131
  def predict_topic(text, nlp=nlp):
132
  global sent_to_words
 
147
  #topic_guess = df_topic_keywords.iloc[np.argmax(topic_probability_scores), Topics]
148
  return infer_topic, topic, topic_probability_scores
149
 
150
+ # # Predict the topic
151
+ # mytext = ["This is a test of a random topic where I talk about politics"]
152
+ # infer_topic, topic, prob_scores = predict_topic(text = mytext, nlp=nlp)
153
 
154
  def apply_predict_topic(text):
155
  text = [text]
 
158
 
159
  df["Topic_key_word"] = df['comment'].apply(apply_predict_topic)
160
 
161
+ print('[x] Generating plot [1]')
162
+ print('Percentuale di commenti ironici per ogni topic')
163
+ perc_topic_irony = {}
164
+ for t in topics:
165
+ total_0label = sum((df.label == 1) & (df.Topic_key_word == t))
166
+ if total_0label != 0:
167
+ total_X_topic = df.Topic_key_word.value_counts()[t]
168
+ else:
169
+ total_0label, total_X_topic = 0, 0.001 # Non ci cono topic nel dataset
170
+ perc_topic_irony[t] = total_0label / total_X_topic
171
+ print(f'{t} w/ label 1: {total_0label}/{total_X_topic} ({total_0label / total_X_topic * 100 :.2f}%)')
172
+
173
+ fig1, ax = plt.subplots(figsize = (10, 7))
174
+ bottom = np.zeros(len(perc_topic_irony))
175
+ width = 0.9
176
 
177
+ ax.bar(perc_topic_irony.keys(), perc_topic_irony.values(), width, label = 'sarcastic')
178
+ comp = list(map(lambda x: 1 - x if x > 0 else 0, perc_topic_irony.values()))
179
+ ax.bar(perc_topic_irony.keys(), comp, width, bottom=list(perc_topic_irony.values()), label = 'not sarcastic')
180
 
181
+ ax.set_title("% of sarcastic comments for each topic")
182
+ plt.xticks(rotation=70)
183
+ plt.legend()
184
+ plt.axhline(0.5, color = 'red', ls=":")
185
+
186
+ # Should this be a parameter?
187
+ # Max number of biggest subreddits to analyse
188
+ n_top_subreddit_to_analyse = 20
189
+
190
+ # probably not necessary (?) To drop eventually if log are to much cluttered!
191
+ print('Percentage of each topic for each subreddit')
192
+ weight_counts = {}
193
+ for t in topics:
194
+ weight_counts[t] = []
195
+ for subreddit in df['subreddit'].value_counts().index[:n_top_subreddit_to_analyse]: # first 10 big subreddits
196
+ if sum(df[df.Topic_key_word == t].subreddit == subreddit) > 0: # se ci sono subreddit per il topic t (almeno una riga nel df)
197
+ perc_sub = df[df.Topic_key_word == t]['subreddit'].value_counts()[subreddit] / df['subreddit'].value_counts()[subreddit]
198
+ else:
199
+ perc_sub = 0
200
+ weight_counts[t].append(perc_sub)
201
+ print(f'Perc of topic {t} in subreddit {subreddit}: {perc_sub * 100:.2f}')
202
+ print()
203
+
204
+
205
+ print('[x] Generating plot [2]')
206
+ # plot
207
+ subreddits = list(df.subreddit.value_counts().index)[:n_top_subreddit_to_analyse]
208
 
209
+ # weight_counts = {
210
+ # t: [
211
+ # df[df.Topic_key_word == t].subreddit.value_counts()[subreddit] / df.subreddit.value_counts()[subreddit] for subreddit in subreddits
212
+ # ] for t in topics
213
+ # }
214
+
215
  irony_percs = {
216
  t: [
217
  len(
 
224
  }
225
  width = 0.9
226
 
227
+ fig2, ax = plt.subplots(figsize = (10, 7))
228
  plt.axhline(0.5, color = 'red', ls=":", alpha = .3)
229
 
230
  bottom = np.zeros(len(subreddits))
 
236
 
237
  ax.set_title("Perc of topics for each subreddit")
238
  ax.legend(loc="upper right")
239
+ plt.xticks(rotation=50)
240
+
241
+ print('[v] All looking good!')
242
 
243
+ return df_topic_keywords, fig1, fig2
244
 
245
 
246
  # def main():
 
253
  gr.Markdown("### Questo 猫 un sottotitolo")
254
  # gradio.Dataframe(路路路)
255
 
 
256
 
257
  btn = gr.Button(value="Submit")
 
 
258
 
259
+ btn.click(
260
+ get_lda,
261
+ inputs=[
262
+ gr.Slider(2, 25, value=5, step = 1, label="N components", info="Scegli il numero di componenti per LDA"),
263
+ ],
264
+ outputs=[
265
+ gr.DataFrame(),
266
+ gr.Plot(label="Plot 1"),
267
+ gr.Plot(label="Plot 2"),
268
+ ]
269
+ )
270
 
271
 
272
  # iface = gr.Interface(fn=greet, inputs="text", outputs="text")