pcuenq HF staff commited on
Commit
0b076b7
·
1 Parent(s): b6472af

Only validate on macOS.

Browse files
Files changed (1) hide show
  1. app.py +17 -7
app.py CHANGED
@@ -6,6 +6,8 @@ from pathlib import Path
6
 
7
  from huggingface_hub import hf_hub_download, HfApi
8
  from coremltools import ComputeUnit
 
 
9
  from transformers.onnx.utils import get_preprocessor
10
 
11
  from exporters.coreml import export
@@ -155,10 +157,11 @@ def convert_model(preprocessor, model, model_coreml_config,
155
 
156
  mlmodel.save(filename)
157
 
158
- progress(progress_end * 0.8, desc=f"Validating {model_label}")
159
- if tolerance is None:
160
- tolerance = coreml_config.atol_for_validation
161
- validate_model_outputs(coreml_config, preprocessor, model, mlmodel, tolerance)
 
162
  progress(progress_end, desc=f"Done converting {model_label}")
163
 
164
 
@@ -176,8 +179,7 @@ def push_to_hub(destination, directory, task, precision, token=None):
176
  )
177
 
178
  subprocess.run(["rm", "-rf", directory])
179
- return f"""Successfully converted! We opened a PR to add the Core ML weights to the model repo.
180
- Please, view and merge the PR [here]({get_pr_url(HfApi(token=token), destination, commit_message)})."""
181
 
182
 
183
  def convert(model_id, task,
@@ -249,8 +251,16 @@ def convert(model_id, task,
249
  )
250
 
251
  progress(0.7, "Uploading model to Hub")
252
- result = push_to_hub(destination_model, base, task, precision, token=token)
253
  progress(1, "Done")
 
 
 
 
 
 
 
 
254
  return result
255
  except Exception as e:
256
  return error_str(e, model=model_id, task=task, framework=framework, compute_units=compute_units, precision=precision, tolerance=tolerance)
 
6
 
7
  from huggingface_hub import hf_hub_download, HfApi
8
  from coremltools import ComputeUnit
9
+ from coremltools.models.utils import _is_macos, _macos_version
10
+
11
  from transformers.onnx.utils import get_preprocessor
12
 
13
  from exporters.coreml import export
 
157
 
158
  mlmodel.save(filename)
159
 
160
+ if _is_macos() and _macos_version() >= (12, 0):
161
+ progress(progress_end * 0.8, desc=f"Validating {model_label}")
162
+ if tolerance is None:
163
+ tolerance = coreml_config.atol_for_validation
164
+ validate_model_outputs(coreml_config, preprocessor, model, mlmodel, tolerance)
165
  progress(progress_end, desc=f"Done converting {model_label}")
166
 
167
 
 
179
  )
180
 
181
  subprocess.run(["rm", "-rf", directory])
182
+ return get_pr_url(HfApi(token=token), destination, commit_message)
 
183
 
184
 
185
  def convert(model_id, task,
 
251
  )
252
 
253
  progress(0.7, "Uploading model to Hub")
254
+ pr_url = push_to_hub(destination_model, base, task, precision, token=token)
255
  progress(1, "Done")
256
+
257
+ did_validate = _is_macos() and _macos_version() >= (12, 0)
258
+ result = f"""### Successfully converted!
259
+ We opened a PR to add the Core ML weights to the model repo. Please, view and merge the PR [here]({pr_url}).
260
+
261
+ {f"**Note**: model could not be automatically validated as this Space is not running on macOS." if not did_validate else ""}
262
+ """
263
+
264
  return result
265
  except Exception as e:
266
  return error_str(e, model=model_id, task=task, framework=framework, compute_units=compute_units, precision=precision, tolerance=tolerance)