Push to hub.
Browse filesSuggest to open a discussion when an error occurs.
app.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import json
|
|
|
|
|
3 |
from pathlib import Path
|
4 |
|
5 |
from huggingface_hub import hf_hub_download, HfApi
|
@@ -38,14 +40,53 @@ tolerance_mapping = {
|
|
38 |
}
|
39 |
tolerance_labels = list(tolerance_mapping.keys())
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
def url_to_model_id(model_id_str):
|
46 |
if not model_id_str.startswith("https://huggingface.co/"): return model_id_str
|
47 |
return model_id_str.split("/")[-2] + "/" + model_id_str.split("/")[-1]
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
def supported_frameworks(model_id):
|
50 |
"""
|
51 |
Return a list of supported frameworks (`PyTorch` or `TensorFlow`) for a given model_id.
|
@@ -82,7 +123,7 @@ def on_model_change(model):
|
|
82 |
gr.update(visible=bool(model_type)), # Settings column
|
83 |
gr.update(choices=tasks, value=tasks[0] if tasks else None), # Tasks
|
84 |
gr.update(visible=len(frameworks)>1, choices=frameworks, value=selected_framework), # Frameworks
|
85 |
-
gr.update(value=error_str(error)),
|
86 |
)
|
87 |
except Exception as e:
|
88 |
error = e
|
@@ -121,23 +162,49 @@ def convert_model(preprocessor, model, model_coreml_config,
|
|
121 |
progress(progress_end, desc=f"Done converting {model_label}")
|
122 |
|
123 |
|
124 |
-
def
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
compute_units = compute_units_mapping[compute_units]
|
127 |
precision = precision_mapping[precision]
|
128 |
tolerance = tolerance_mapping[tolerance]
|
129 |
framework = framework_mapping[framework]
|
|
|
|
|
|
|
|
|
130 |
|
131 |
# TODO: support legacy format
|
132 |
-
|
|
|
133 |
output.mkdir(parents=True, exist_ok=True)
|
134 |
output = output/f"{precision}_model.mlpackage"
|
135 |
|
136 |
try:
|
137 |
progress(0, desc="Downloading model")
|
138 |
|
139 |
-
preprocessor = get_preprocessor(
|
140 |
-
model = FeaturesManager.get_model_from_feature(task,
|
141 |
_, model_coreml_config = FeaturesManager.check_supported_model_or_raise(model, feature=task)
|
142 |
|
143 |
if task in ["seq2seq-lm", "speech-seq2seq"]:
|
@@ -152,9 +219,9 @@ def convert(model, task, compute_units, precision, tolerance, framework, progres
|
|
152 |
seq2seq="encoder",
|
153 |
progress=progress,
|
154 |
progress_start=0.1,
|
155 |
-
progress_end=0.
|
156 |
)
|
157 |
-
progress(0.
|
158 |
convert_model(
|
159 |
preprocessor,
|
160 |
model,
|
@@ -165,8 +232,8 @@ def convert(model, task, compute_units, precision, tolerance, framework, progres
|
|
165 |
output,
|
166 |
seq2seq="decoder",
|
167 |
progress=progress,
|
168 |
-
progress_start=0.
|
169 |
-
progress_end=0.
|
170 |
)
|
171 |
else:
|
172 |
convert_model(
|
@@ -178,14 +245,15 @@ def convert(model, task, compute_units, precision, tolerance, framework, progres
|
|
178 |
tolerance,
|
179 |
output,
|
180 |
progress=progress,
|
181 |
-
progress_end=0.
|
182 |
)
|
183 |
|
184 |
-
|
|
|
185 |
progress(1, "Done")
|
186 |
-
return
|
187 |
except Exception as e:
|
188 |
-
return error_str(e)
|
189 |
|
190 |
DESCRIPTION = """
|
191 |
## Convert a transformers model to Core ML
|
@@ -235,6 +303,17 @@ with gr.Blocks() as demo:
|
|
235 |
choices=tolerance_labels,
|
236 |
value=tolerance_labels[0],
|
237 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
btn_convert = gr.Button("Convert")
|
239 |
gr.Markdown("Conversion will take a few minutes.")
|
240 |
|
@@ -251,18 +330,25 @@ with gr.Blocks() as demo:
|
|
251 |
|
252 |
btn_convert.click(
|
253 |
fn=convert,
|
254 |
-
inputs=[input_model, radio_tasks, radio_compute, radio_precision, radio_tolerance, radio_framework],
|
255 |
outputs=error_output,
|
256 |
scroll_to_output=True
|
257 |
)
|
258 |
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
|
267 |
demo.queue(concurrency_count=1, max_size=10)
|
268 |
demo.launch(debug=True, share=False)
|
|
|
1 |
import gradio as gr
|
2 |
import json
|
3 |
+
import subprocess
|
4 |
+
import urllib.parse
|
5 |
from pathlib import Path
|
6 |
|
7 |
from huggingface_hub import hf_hub_download, HfApi
|
|
|
40 |
}
|
41 |
tolerance_labels = list(tolerance_mapping.keys())
|
42 |
|
43 |
+
push_mapping = {
|
44 |
+
"Submit a PR to the original repo": "pr",
|
45 |
+
"Create a new repo": "new",
|
46 |
+
}
|
47 |
+
push_labels = list(push_mapping.keys())
|
48 |
+
|
49 |
+
def error_str(error, title="Error", model=None, task=None, framework=None, compute_units=None, precision=None, tolerance=None, destination=None):
|
50 |
+
if not error: return ""
|
51 |
+
|
52 |
+
issue_title = urllib.parse.quote(f"Error converting {model}")
|
53 |
+
issue_description = urllib.parse.quote(f"""Conversion Settings:
|
54 |
+
|
55 |
+
Model: {model}
|
56 |
+
Task: {task}
|
57 |
+
Framework: {framework}
|
58 |
+
Compute Units: {compute_units}
|
59 |
+
Precision: {precision}
|
60 |
+
Tolerance: {tolerance}
|
61 |
+
Push to: {destination}
|
62 |
+
|
63 |
+
Error: {error}
|
64 |
+
""")
|
65 |
+
issue_url = f"https://huggingface.co/spaces/pcuenq/transformers-to-coreml/discussions/new?title={issue_title}&description={issue_description}"
|
66 |
+
return f"""
|
67 |
+
#### {title}
|
68 |
+
{error}
|
69 |
+
|
70 |
+
It could be that the model is not yet compatible with the Core ML exporter. Please, open a discussion on the [Hugging Face Hub]({issue_url}) to report this issue.
|
71 |
+
"""
|
72 |
|
73 |
def url_to_model_id(model_id_str):
|
74 |
if not model_id_str.startswith("https://huggingface.co/"): return model_id_str
|
75 |
return model_id_str.split("/")[-2] + "/" + model_id_str.split("/")[-1]
|
76 |
|
77 |
+
def get_pr_url(api, repo_id, title):
|
78 |
+
try:
|
79 |
+
discussions = api.get_repo_discussions(repo_id=repo_id)
|
80 |
+
except Exception:
|
81 |
+
return None
|
82 |
+
for discussion in discussions:
|
83 |
+
if (
|
84 |
+
discussion.status == "open"
|
85 |
+
and discussion.is_pull_request
|
86 |
+
and discussion.title == title
|
87 |
+
):
|
88 |
+
return f"https://huggingface.co/{repo_id}/discussions/{discussion.num}"
|
89 |
+
|
90 |
def supported_frameworks(model_id):
|
91 |
"""
|
92 |
Return a list of supported frameworks (`PyTorch` or `TensorFlow`) for a given model_id.
|
|
|
123 |
gr.update(visible=bool(model_type)), # Settings column
|
124 |
gr.update(choices=tasks, value=tasks[0] if tasks else None), # Tasks
|
125 |
gr.update(visible=len(frameworks)>1, choices=frameworks, value=selected_framework), # Frameworks
|
126 |
+
gr.update(value=error_str(error, model=model)), # Error
|
127 |
)
|
128 |
except Exception as e:
|
129 |
error = e
|
|
|
162 |
progress(progress_end, desc=f"Done converting {model_label}")
|
163 |
|
164 |
|
165 |
+
def push_to_hub(destination, directory, task, precision, token=None):
|
166 |
+
api = HfApi(token=token)
|
167 |
+
api.create_repo(destination, token=token, exist_ok=True)
|
168 |
+
commit_message="Add Core ML conversion"
|
169 |
+
api.upload_folder(
|
170 |
+
folder_path=directory,
|
171 |
+
repo_id=destination,
|
172 |
+
token=token,
|
173 |
+
create_pr=True,
|
174 |
+
commit_message=commit_message,
|
175 |
+
commit_description=f"Core ML conversion, task={task}, precision={precision}",
|
176 |
+
)
|
177 |
+
|
178 |
+
subprocess.run(["rm", "-rf", directory])
|
179 |
+
return f"""Successfully converted! We opened a PR to add the Core ML weights to the model repo.
|
180 |
+
Please, view and merge the PR [here]({get_pr_url(HfApi(token=token), destination, commit_message)})."""
|
181 |
+
|
182 |
+
|
183 |
+
def convert(model_id, task,
|
184 |
+
compute_units, precision, tolerance, framework,
|
185 |
+
push_destination, destination_model, token,
|
186 |
+
progress=gr.Progress()):
|
187 |
+
model_id = url_to_model_id(model_id)
|
188 |
compute_units = compute_units_mapping[compute_units]
|
189 |
precision = precision_mapping[precision]
|
190 |
tolerance = tolerance_mapping[tolerance]
|
191 |
framework = framework_mapping[framework]
|
192 |
+
push_destination = push_mapping[push_destination]
|
193 |
+
if push_destination == "pr":
|
194 |
+
destination_model = model_id
|
195 |
+
token = None
|
196 |
|
197 |
# TODO: support legacy format
|
198 |
+
base = Path("exported")/model_id
|
199 |
+
output = base/"coreml"/task
|
200 |
output.mkdir(parents=True, exist_ok=True)
|
201 |
output = output/f"{precision}_model.mlpackage"
|
202 |
|
203 |
try:
|
204 |
progress(0, desc="Downloading model")
|
205 |
|
206 |
+
preprocessor = get_preprocessor(model_id)
|
207 |
+
model = FeaturesManager.get_model_from_feature(task, model_id, framework=framework)
|
208 |
_, model_coreml_config = FeaturesManager.check_supported_model_or_raise(model, feature=task)
|
209 |
|
210 |
if task in ["seq2seq-lm", "speech-seq2seq"]:
|
|
|
219 |
seq2seq="encoder",
|
220 |
progress=progress,
|
221 |
progress_start=0.1,
|
222 |
+
progress_end=0.4,
|
223 |
)
|
224 |
+
progress(0.4, desc="Converting decoder")
|
225 |
convert_model(
|
226 |
preprocessor,
|
227 |
model,
|
|
|
232 |
output,
|
233 |
seq2seq="decoder",
|
234 |
progress=progress,
|
235 |
+
progress_start=0.4,
|
236 |
+
progress_end=0.7,
|
237 |
)
|
238 |
else:
|
239 |
convert_model(
|
|
|
245 |
tolerance,
|
246 |
output,
|
247 |
progress=progress,
|
248 |
+
progress_end=0.7,
|
249 |
)
|
250 |
|
251 |
+
progress(0.7, "Uploading model to Hub")
|
252 |
+
result = push_to_hub(destination_model, base, task, precision, token=token)
|
253 |
progress(1, "Done")
|
254 |
+
return result
|
255 |
except Exception as e:
|
256 |
+
return error_str(e, model=model_id, task=task, framework=framework, compute_units=compute_units, precision=precision, tolerance=tolerance)
|
257 |
|
258 |
DESCRIPTION = """
|
259 |
## Convert a transformers model to Core ML
|
|
|
303 |
choices=tolerance_labels,
|
304 |
value=tolerance_labels[0],
|
305 |
)
|
306 |
+
|
307 |
+
radio_push = gr.Radio(
|
308 |
+
label="Destination Model",
|
309 |
+
choices=push_labels,
|
310 |
+
value=push_labels[0],
|
311 |
+
)
|
312 |
+
with gr.Row(visible=False) as row_destination:
|
313 |
+
# TODO: public/private
|
314 |
+
text_destination = gr.Textbox(label="Destination model name", value="")
|
315 |
+
text_token = gr.Textbox(label="Token (write permissions)", value="")
|
316 |
+
|
317 |
btn_convert = gr.Button("Convert")
|
318 |
gr.Markdown("Conversion will take a few minutes.")
|
319 |
|
|
|
330 |
|
331 |
btn_convert.click(
|
332 |
fn=convert,
|
333 |
+
inputs=[input_model, radio_tasks, radio_compute, radio_precision, radio_tolerance, radio_framework, radio_push, text_destination, text_token],
|
334 |
outputs=error_output,
|
335 |
scroll_to_output=True
|
336 |
)
|
337 |
|
338 |
+
radio_push.change(
|
339 |
+
lambda x: gr.update(visible=x == "Create a new repo"),
|
340 |
+
inputs=radio_push,
|
341 |
+
outputs=row_destination,
|
342 |
+
queue=False,
|
343 |
+
scroll_to_output=True
|
344 |
+
)
|
345 |
+
|
346 |
+
gr.HTML("""
|
347 |
+
<div style="border-top: 0.5px solid #303030;">
|
348 |
+
<br>
|
349 |
+
<p style="color:gray;font-size:smaller;font-style:italic">Adapted from https://huggingface.co/spaces/diffusers/sd-to-diffusers/tree/main</p><br>
|
350 |
+
</div>
|
351 |
+
""")
|
352 |
|
353 |
demo.queue(concurrency_count=1, max_size=10)
|
354 |
demo.launch(debug=True, share=False)
|