meg-huggingface commited on
Commit
3dbfbdf
·
1 Parent(s): 936b02e
src/backend/run_eval_suite_harness.py CHANGED
@@ -13,6 +13,17 @@ from src.logging import setup_logger
13
  logging.getLogger("openai").setLevel(logging.WARNING)
14
  logger = setup_logger(__name__)
15
 
 
 
 
 
 
 
 
 
 
 
 
16
  def run_evaluation(eval_request: EvalRequest, task_names: list, num_fewshot: int, batch_size: int, device: str, local_dir: str, results_repo: str, no_cache: bool =True, limit: int =None):
17
  """Runs one evaluation for the current evaluation request file, then pushes the results to the hub.
18
 
@@ -75,7 +86,7 @@ def run_evaluation(eval_request: EvalRequest, task_names: list, num_fewshot: int
75
  print(key2)
76
  print(value2)
77
  print(type(value))
78
- dumped = json.dumps(results, indent=2)
79
  logger.info(dumped)
80
 
81
  output_path = os.path.join(local_dir, *eval_request.model.split("/"), f"results_{datetime.now()}.json")
 
13
  logging.getLogger("openai").setLevel(logging.WARNING)
14
  logger = setup_logger(__name__)
15
 
16
+ class NumpyArrayEncoder(json.JSONEncoder):
17
+ def default(self, obj):
18
+ if isinstance(obj, np.ndarray):
19
+ return obj.tolist()
20
+ elif isinstance(obj, np.integer):
21
+ return int(obj)
22
+ elif isinstance(obj, np.floating):
23
+ return float(obj)
24
+ else:
25
+ return super().default(obj)
26
+
27
  def run_evaluation(eval_request: EvalRequest, task_names: list, num_fewshot: int, batch_size: int, device: str, local_dir: str, results_repo: str, no_cache: bool =True, limit: int =None):
28
  """Runs one evaluation for the current evaluation request file, then pushes the results to the hub.
29
 
 
86
  print(key2)
87
  print(value2)
88
  print(type(value))
89
+ dumped = json.dumps(results, cls=NumpyArrayEncoder, indent=2)
90
  logger.info(dumped)
91
 
92
  output_path = os.path.join(local_dir, *eval_request.model.split("/"), f"results_{datetime.now()}.json")