hysts HF Staff commited on
Commit
5561069
·
1 Parent(s): bfa16fe

Disable access to unofficial models

Browse files
Files changed (2) hide show
  1. app.py +0 -25
  2. constants.py +0 -10
app.py CHANGED
@@ -5,9 +5,7 @@ from __future__ import annotations
5
  import os
6
 
7
  import gradio as gr
8
- from huggingface_hub import HfApi
9
 
10
- from constants import MODEL_LIBRARY_ORG_NAME
11
  from inference import InferencePipeline
12
 
13
 
@@ -15,15 +13,6 @@ class InferenceUtil:
15
  def __init__(self, hf_token: str | None):
16
  self.hf_token = hf_token
17
 
18
- def load_hub_model_list(self) -> dict:
19
- api = HfApi(token=self.hf_token)
20
- choices = [
21
- info.modelId
22
- for info in api.list_models(author=MODEL_LIBRARY_ORG_NAME)
23
- ]
24
- return gr.update(choices=choices,
25
- value=choices[0] if choices else None)
26
-
27
  def load_model_info(self, model_id: str) -> tuple[str, str]:
28
  try:
29
  card = InferencePipeline.get_model_card(model_id, self.hf_token)
@@ -33,12 +22,6 @@ class InferenceUtil:
33
  training_prompt = getattr(card.data, 'training_prompt', '')
34
  return base_model, training_prompt
35
 
36
- def reload_model_list_and_update_model_info(self) -> tuple[dict, str, str]:
37
- model_list_update = self.load_hub_model_list()
38
- model_list = model_list_update['choices']
39
- model_info = self.load_model_info(model_list[0] if model_list else '')
40
- return model_list_update, *model_info
41
-
42
 
43
  TITLE = '# [Tune-A-Video](https://tuneavideo.github.io/)'
44
  HF_TOKEN = os.getenv('HF_TOKEN')
@@ -51,7 +34,6 @@ with gr.Blocks(css='style.css') as demo:
51
  with gr.Row():
52
  with gr.Column():
53
  with gr.Box():
54
- reload_button = gr.Button('Reload Model List')
55
  model_id = gr.Dropdown(
56
  label='Model ID',
57
  choices=[
@@ -214,13 +196,6 @@ with gr.Blocks(css='style.css') as demo:
214
  fn=pipe.run,
215
  cache_examples=True)
216
 
217
- reload_button.click(fn=app.reload_model_list_and_update_model_info,
218
- inputs=None,
219
- outputs=[
220
- model_id,
221
- base_model_used_for_training,
222
- prompt_used_for_training,
223
- ])
224
  model_id.change(fn=app.load_model_info,
225
  inputs=model_id,
226
  outputs=[
 
5
  import os
6
 
7
  import gradio as gr
 
8
 
 
9
  from inference import InferencePipeline
10
 
11
 
 
13
  def __init__(self, hf_token: str | None):
14
  self.hf_token = hf_token
15
 
 
 
 
 
 
 
 
 
 
16
  def load_model_info(self, model_id: str) -> tuple[str, str]:
17
  try:
18
  card = InferencePipeline.get_model_card(model_id, self.hf_token)
 
22
  training_prompt = getattr(card.data, 'training_prompt', '')
23
  return base_model, training_prompt
24
 
 
 
 
 
 
 
25
 
26
  TITLE = '# [Tune-A-Video](https://tuneavideo.github.io/)'
27
  HF_TOKEN = os.getenv('HF_TOKEN')
 
34
  with gr.Row():
35
  with gr.Column():
36
  with gr.Box():
 
37
  model_id = gr.Dropdown(
38
  label='Model ID',
39
  choices=[
 
196
  fn=pipe.run,
197
  cache_examples=True)
198
 
 
 
 
 
 
 
 
199
  model_id.change(fn=app.load_model_info,
200
  inputs=model_id,
201
  outputs=[
constants.py DELETED
@@ -1,10 +0,0 @@
1
- import enum
2
-
3
-
4
- class UploadTarget(enum.Enum):
5
- PERSONAL_PROFILE = 'Personal Profile'
6
- MODEL_LIBRARY = 'Tune-A-Video Library'
7
-
8
-
9
- MODEL_LIBRARY_ORG_NAME = 'Tune-A-Video-library'
10
- SAMPLE_MODEL_REPO = 'Tune-A-Video-library/a-man-is-surfing'