Spaces:
Running
Running
Update app.py
#14
by
reach-vb
HF staff
- opened
app.py
CHANGED
@@ -9,6 +9,8 @@ from huggingface_hub import create_repo, HfApi
|
|
9 |
from huggingface_hub import snapshot_download
|
10 |
from huggingface_hub import whoami
|
11 |
from huggingface_hub import ModelCard
|
|
|
|
|
12 |
|
13 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
14 |
|
@@ -20,15 +22,31 @@ from mlx_lm import convert
|
|
20 |
|
21 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
def process_model(model_id, q_method,oauth_token: gr.OAuthToken | None):
|
|
|
24 |
if oauth_token.token is None:
|
25 |
-
raise ValueError("You must be logged in to use
|
|
|
26 |
model_name = model_id.split('/')[-1]
|
27 |
username = whoami(oauth_token.token)["name"]
|
|
|
|
|
28 |
|
29 |
try:
|
30 |
upload_repo = username + "/" + model_name + "-mlx"
|
31 |
convert(model_id, quantize=True, upload_repo=upload_repo)
|
|
|
32 |
return (
|
33 |
f'Find your repo <a href=\'{new_repo_url}\' target="_blank" style="text-decoration:underline">here</a>',
|
34 |
"llama.png",
|
@@ -37,6 +55,7 @@ def process_model(model_id, q_method,oauth_token: gr.OAuthToken | None):
|
|
37 |
return (f"Error: {e}", "error.png")
|
38 |
finally:
|
39 |
shutil.rmtree("mlx_model", ignore_errors=True)
|
|
|
40 |
print("Folder cleaned up successfully!")
|
41 |
|
42 |
css="""/* Custom CSS to allow scrolling */
|
|
|
9 |
from huggingface_hub import snapshot_download
|
10 |
from huggingface_hub import whoami
|
11 |
from huggingface_hub import ModelCard
|
12 |
+
from huggingface_hub import login
|
13 |
+
from huggingface_hub import scan_cache_dir
|
14 |
|
15 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
16 |
|
|
|
22 |
|
23 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
24 |
|
25 |
+
def clear_cache():
|
26 |
+
scan = scan_cache_dir()
|
27 |
+
to_delete = []
|
28 |
+
for repo in scan.repos:
|
29 |
+
if repo.repo_type == "model":
|
30 |
+
to_delete.append([rev.commit_hash for rev in repo.revisions])
|
31 |
+
|
32 |
+
scan.delete_revisions(*to_delete)
|
33 |
+
|
34 |
+
print("Cache has been cleared")
|
35 |
+
|
36 |
def process_model(model_id, q_method,oauth_token: gr.OAuthToken | None):
|
37 |
+
|
38 |
if oauth_token.token is None:
|
39 |
+
raise ValueError("You must be logged in to use MLX-my-repo")
|
40 |
+
|
41 |
model_name = model_id.split('/')[-1]
|
42 |
username = whoami(oauth_token.token)["name"]
|
43 |
+
|
44 |
+
login(token=oauth_token.token, add_to_git_credential=True)
|
45 |
|
46 |
try:
|
47 |
upload_repo = username + "/" + model_name + "-mlx"
|
48 |
convert(model_id, quantize=True, upload_repo=upload_repo)
|
49 |
+
clear_cache()
|
50 |
return (
|
51 |
f'Find your repo <a href=\'{new_repo_url}\' target="_blank" style="text-decoration:underline">here</a>',
|
52 |
"llama.png",
|
|
|
55 |
return (f"Error: {e}", "error.png")
|
56 |
finally:
|
57 |
shutil.rmtree("mlx_model", ignore_errors=True)
|
58 |
+
clear_cache()
|
59 |
print("Folder cleaned up successfully!")
|
60 |
|
61 |
css="""/* Custom CSS to allow scrolling */
|