camillebrl commited on
Commit
5518620
·
verified ·
1 Parent(s): 43cc0bf

Update tasks/text.py

Browse files
Files changed (1) hide show
  1. tasks/text.py +5 -2
tasks/text.py CHANGED
@@ -66,14 +66,14 @@ async def evaluate_text(request: TextEvaluationRequest):
66
  "camillebrl/ModernBERT-envclaims-overfit",
67
  device="cpu"
68
  )
69
-
70
  predictions = []
71
  for batch in range(0, len(test_dataset["quote"]), 32): # Ajustez la taille des batchs
72
  batch_quotes = test_dataset["quote"][batch:batch + 32]
73
  batch_predictions = classifier(batch_quotes)
74
  predictions.extend([label2id[pred["label"]] for pred in batch_predictions])
75
  print(predictions)
76
-
77
  #--------------------------------------------------------------------------------------------
78
  # YOUR MODEL INFERENCE STOPS HERE
79
  #--------------------------------------------------------------------------------------------
@@ -84,6 +84,7 @@ async def evaluate_text(request: TextEvaluationRequest):
84
 
85
  # Calculate accuracy
86
  accuracy = accuracy_score(true_labels, predictions)
 
87
 
88
  # Prepare results dictionary
89
  results = {
@@ -102,5 +103,7 @@ async def evaluate_text(request: TextEvaluationRequest):
102
  "test_seed": request.test_seed
103
  }
104
  }
 
 
105
 
106
  return results
 
66
  "camillebrl/ModernBERT-envclaims-overfit",
67
  device="cpu"
68
  )
69
+ print("len dataset : ", len(test_dataset["quote"]))
70
  predictions = []
71
  for batch in range(0, len(test_dataset["quote"]), 32): # Ajustez la taille des batchs
72
  batch_quotes = test_dataset["quote"][batch:batch + 32]
73
  batch_predictions = classifier(batch_quotes)
74
  predictions.extend([label2id[pred["label"]] for pred in batch_predictions])
75
  print(predictions)
76
+ print("final predictions : ", predictions)
77
  #--------------------------------------------------------------------------------------------
78
  # YOUR MODEL INFERENCE STOPS HERE
79
  #--------------------------------------------------------------------------------------------
 
84
 
85
  # Calculate accuracy
86
  accuracy = accuracy_score(true_labels, predictions)
87
+ print("accuracy : ", accuracy)
88
 
89
  # Prepare results dictionary
90
  results = {
 
103
  "test_seed": request.test_seed
104
  }
105
  }
106
+
107
+ print("results : ", results)
108
 
109
  return results