pcuenq HF staff commited on
Commit
680c2df
·
1 Parent(s): e05f54a

Add simple progress reporting.

Browse files
Files changed (1) hide show
  1. app.py +23 -5
app.py CHANGED
@@ -89,9 +89,14 @@ def on_model_change(model):
89
  model_type = None
90
 
91
 
92
- def convert_model(preprocessor, model, model_coreml_config, compute_units, precision, tolerance, output, use_past=False, seq2seq=None):
 
 
 
93
  coreml_config = model_coreml_config(model.config, use_past=use_past, seq2seq=seq2seq)
94
 
 
 
95
  mlmodel = export(
96
  preprocessor,
97
  model,
@@ -109,12 +114,14 @@ def convert_model(preprocessor, model, model_coreml_config, compute_units, preci
109
 
110
  mlmodel.save(filename)
111
 
 
112
  if tolerance is None:
113
  tolerance = coreml_config.atol_for_validation
114
  validate_model_outputs(coreml_config, preprocessor, model, mlmodel, tolerance)
 
115
 
116
 
117
- def convert(model, task, compute_units, precision, tolerance, framework):
118
  model = url_to_model_id(model)
119
  compute_units = compute_units_mapping[compute_units]
120
  precision = precision_mapping[precision]
@@ -127,12 +134,13 @@ def convert(model, task, compute_units, precision, tolerance, framework):
127
  output = output/f"{precision}_model.mlpackage"
128
 
129
  try:
 
 
130
  preprocessor = get_preprocessor(model)
131
  model = FeaturesManager.get_model_from_feature(task, model, framework=framework)
132
  _, model_coreml_config = FeaturesManager.check_supported_model_or_raise(model, feature=task)
133
 
134
  if task in ["seq2seq-lm", "speech-seq2seq"]:
135
- # Convert encoder / decoder
136
  convert_model(
137
  preprocessor,
138
  model,
@@ -141,8 +149,12 @@ def convert(model, task, compute_units, precision, tolerance, framework):
141
  precision,
142
  tolerance,
143
  output,
144
- seq2seq="encoder"
 
 
 
145
  )
 
146
  convert_model(
147
  preprocessor,
148
  model,
@@ -151,7 +163,10 @@ def convert(model, task, compute_units, precision, tolerance, framework):
151
  precision,
152
  tolerance,
153
  output,
154
- seq2seq="decoder"
 
 
 
155
  )
156
  else:
157
  convert_model(
@@ -162,9 +177,12 @@ def convert(model, task, compute_units, precision, tolerance, framework):
162
  precision,
163
  tolerance,
164
  output,
 
 
165
  )
166
 
167
  # TODO: push to hub, whatever
 
168
  return "Done"
169
  except Exception as e:
170
  return error_str(e)
 
89
  model_type = None
90
 
91
 
92
+ def convert_model(preprocessor, model, model_coreml_config,
93
+ compute_units, precision, tolerance, output,
94
+ use_past=False, seq2seq=None,
95
+ progress=None, progress_start=0.1, progress_end=0.8):
96
  coreml_config = model_coreml_config(model.config, use_past=use_past, seq2seq=seq2seq)
97
 
98
+ model_label = "model" if seq2seq is None else seq2seq
99
+ progress(progress_start, desc=f"Converting {model_label}")
100
  mlmodel = export(
101
  preprocessor,
102
  model,
 
114
 
115
  mlmodel.save(filename)
116
 
117
+ progress(progress_end * 0.8, desc=f"Validating {model_label}")
118
  if tolerance is None:
119
  tolerance = coreml_config.atol_for_validation
120
  validate_model_outputs(coreml_config, preprocessor, model, mlmodel, tolerance)
121
+ progress(progress_end, desc=f"Done converting {model_label}")
122
 
123
 
124
+ def convert(model, task, compute_units, precision, tolerance, framework, progress=gr.Progress()):
125
  model = url_to_model_id(model)
126
  compute_units = compute_units_mapping[compute_units]
127
  precision = precision_mapping[precision]
 
134
  output = output/f"{precision}_model.mlpackage"
135
 
136
  try:
137
+ progress(0, desc="Downloading model")
138
+
139
  preprocessor = get_preprocessor(model)
140
  model = FeaturesManager.get_model_from_feature(task, model, framework=framework)
141
  _, model_coreml_config = FeaturesManager.check_supported_model_or_raise(model, feature=task)
142
 
143
  if task in ["seq2seq-lm", "speech-seq2seq"]:
 
144
  convert_model(
145
  preprocessor,
146
  model,
 
149
  precision,
150
  tolerance,
151
  output,
152
+ seq2seq="encoder",
153
+ progress=progress,
154
+ progress_start=0.1,
155
+ progress_end=0.45,
156
  )
157
+ progress(0.6, desc="Converting decoder")
158
  convert_model(
159
  preprocessor,
160
  model,
 
163
  precision,
164
  tolerance,
165
  output,
166
+ seq2seq="decoder",
167
+ progress=progress,
168
+ progress_start=0.45,
169
+ progress_end=0.8,
170
  )
171
  else:
172
  convert_model(
 
177
  precision,
178
  tolerance,
179
  output,
180
+ progress=progress,
181
+ progress_end=0.8,
182
  )
183
 
184
  # TODO: push to hub, whatever
185
+ progress(1, "Done")
186
  return "Done"
187
  except Exception as e:
188
  return error_str(e)