Joshua1808 commited on
Commit
f6ee10a
1 Parent(s): edc6b13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -41
app.py CHANGED
@@ -128,46 +128,45 @@ def run():
128
 
129
  elif (usuario):
130
  tweets = api.user_timeline(screen_name = search_words,count=number_of_tweets)
131
-
132
- tweet_list = [i.text for i in tweets]
133
- text= pd.DataFrame(tweet_list)
134
- text[0] = text[0].apply(preprocess_tweet)
135
- text1=text[0].values
136
- indices1=tokenizer.batch_encode_plus(text1.tolist(),max_length=128,add_special_tokens=True, return_attention_mask=True,pad_to_max_length=True,truncation=True)
137
- input_ids1=indices1["input_ids"]
138
- attention_masks1=indices1["attention_mask"]
139
- prediction_inputs1= torch.tensor(input_ids1)
140
- prediction_masks1 = torch.tensor(attention_masks1)
141
- # Set the batch size.
142
- batch_size = 25
143
- # Create the DataLoader.
144
- prediction_data1 = TensorDataset(prediction_inputs1, prediction_masks1)
145
- prediction_sampler1 = SequentialSampler(prediction_data1)
146
- prediction_dataloader1 = DataLoader(prediction_data1, sampler=prediction_sampler1, batch_size=batch_size)
147
- print('Predicting labels for {:,} test sentences...'.format(len(prediction_inputs1)))
148
- # Pone el modelo en modo evaluaci贸n
149
- model.eval()
150
- # Variables de Seguimiento
151
- predictions = []
152
- # Predict
153
- for batch in prediction_dataloader1:
154
- batch = tuple(t.to(device) for t in batch)
155
- # Descomprimir las entradas de nuestro cargador de datos
156
- b_input_ids1, b_input_mask1 = batch
157
- # Decirle al modelo que no calcule ni almacene gradientes, ahorrando memoria y # acelerando la predicci贸n.
158
- with torch.no_grad():
159
- # Forward pass, calculate logit predictions
160
- outputs1 = model(b_input_ids1, token_type_ids=None,attention_mask=b_input_mask1)
161
- logits1 = outputs1[0]
162
- # Move logits and labels to CPU
163
- logits1 = logits1.detach().cpu().numpy()
164
- # Store predictions and true labels
165
- predictions.append(logits1)
166
- flat_predictions = [item for sublist in predictions for item in sublist]
167
- flat_predictions = np.argmax(flat_predictions, axis=1).flatten()#p = [i for i in classifier(tweet_list)]
168
- df = pd.DataFrame(list(zip(tweet_list, flat_predictions)),columns =['脷ltimos '+ str(number_of_tweets)+' Tweets'+' de '+search_words, 'Sexista'])
169
- df['Sexista']= np.where(df['Sexista']== 0, 'No Sexistas', 'Sexistas')
170
-
171
- st.table(df.reset_index(drop=True).head(20).style.applymap(color_survived, subset=['Sexista']))
172
 
173
  run()
 
128
 
129
  elif (usuario):
130
  tweets = api.user_timeline(screen_name = search_words,count=number_of_tweets)
131
+ tweet_list = [i.text for i in tweets]
132
+ text= pd.DataFrame(tweet_list)
133
+ text[0] = text[0].apply(preprocess_tweet)
134
+ text1=text[0].values
135
+ indices1=tokenizer.batch_encode_plus(text1.tolist(),max_length=128,add_special_tokens=True, return_attention_mask=True,pad_to_max_length=True,truncation=True)
136
+ input_ids1=indices1["input_ids"]
137
+ attention_masks1=indices1["attention_mask"]
138
+ prediction_inputs1= torch.tensor(input_ids1)
139
+ prediction_masks1 = torch.tensor(attention_masks1)
140
+ # Set the batch size.
141
+ batch_size = 25
142
+ # Create the DataLoader.
143
+ prediction_data1 = TensorDataset(prediction_inputs1, prediction_masks1)
144
+ prediction_sampler1 = SequentialSampler(prediction_data1)
145
+ prediction_dataloader1 = DataLoader(prediction_data1, sampler=prediction_sampler1, batch_size=batch_size)
146
+ print('Predicting labels for {:,} test sentences...'.format(len(prediction_inputs1)))
147
+ # Pone el modelo en modo evaluaci贸n
148
+ model.eval()
149
+ # Variables de Seguimiento
150
+ predictions = []
151
+ # Predict
152
+ for batch in prediction_dataloader1:
153
+ batch = tuple(t.to(device) for t in batch)
154
+ # Descomprimir las entradas de nuestro cargador de datos
155
+ b_input_ids1, b_input_mask1 = batch
156
+ # Decirle al modelo que no calcule ni almacene gradientes, ahorrando memoria y # acelerando la predicci贸n.
157
+ with torch.no_grad():
158
+ # Forward pass, calculate logit predictions
159
+ outputs1 = model(b_input_ids1, token_type_ids=None,attention_mask=b_input_mask1)
160
+ logits1 = outputs1[0]
161
+ # Move logits and labels to CPU
162
+ logits1 = logits1.detach().cpu().numpy()
163
+ # Store predictions and true labels
164
+ predictions.append(logits1)
165
+ flat_predictions = [item for sublist in predictions for item in sublist]
166
+ flat_predictions = np.argmax(flat_predictions, axis=1).flatten()#p = [i for i in classifier(tweet_list)]
167
+ df = pd.DataFrame(list(zip(tweet_list, flat_predictions)),columns =['脷ltimos '+ str(number_of_tweets)+' Tweets'+' de '+search_words, 'Sexista'])
168
+ df['Sexista']= np.where(df['Sexista']== 0, 'No Sexistas', 'Sexistas')
169
+
170
+ st.table(df.reset_index(drop=True).head(20).style.applymap(color_survived, subset=['Sexista']))
 
171
 
172
  run()