Kingston Yip commited on
Commit
664aa80
·
1 Parent(s): f3c89dc

removed cache

Browse files
Files changed (1) hide show
  1. app.py +3 -12
app.py CHANGED
@@ -9,25 +9,16 @@ from random import randint
9
  def predict_cyberbullying_probability(sentence, tokenizer, model):
10
  # Preprocess the input sentence
11
  inputs = tokenizer(sentence, padding='max_length', return_token_type_ids=False, return_attention_mask=True, truncation=True, max_length=512, return_tensors='pt')
12
-
13
- print("==========")
14
- print(inputs)
15
- print("==========")
16
 
17
- attention_mask = inputs['attention_mask'].flatten()
18
- print("==========")
19
- print(attention_mask)
20
- inputs = inputs['input_ids'].flatten()
21
- # print("\n\ninputs\n\n", inputs)
22
- # Disable gradient computation
23
  with torch.no_grad():
24
  # Forward pass
25
  outputs = model(inputs, attention_mask=attention_mask)
26
 
27
  probs = torch.sigmoid(outputs.logits.unsqueeze(1).flatten())
28
 
29
-
30
-
31
  res = probs.numpy().tolist()
32
  return res
33
 
 
9
  def predict_cyberbullying_probability(sentence, tokenizer, model):
10
  # Preprocess the input sentence
11
  inputs = tokenizer(sentence, padding='max_length', return_token_type_ids=False, return_attention_mask=True, truncation=True, max_length=512, return_tensors='pt')
 
 
 
 
12
 
13
+ attention_mask = inputs['attention_mask']
14
+ inputs = inputs['input_ids']
15
+
 
 
 
16
  with torch.no_grad():
17
  # Forward pass
18
  outputs = model(inputs, attention_mask=attention_mask)
19
 
20
  probs = torch.sigmoid(outputs.logits.unsqueeze(1).flatten())
21
 
 
 
22
  res = probs.numpy().tolist()
23
  return res
24