Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,11 @@ from huggingface_hub import InferenceClient
|
|
5 |
from threading import Timer
|
6 |
|
7 |
HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
|
8 |
-
def get_available_free():
|
|
|
|
|
|
|
|
|
9 |
models_dict = InferenceClient(token=HUGGINGFACE_TOKEN).list_deployed_models("text-generation-inference")
|
10 |
models = models_dict['text-generation'] + models_dict['text2text-generation']
|
11 |
models_vision = models_dict['image-text-to-text']
|
@@ -59,8 +63,8 @@ def get_available_free():
|
|
59 |
pd.DataFrame(models_conclusion).to_csv("data.csv", index=False)
|
60 |
return models_conclusion
|
61 |
|
62 |
-
def update_data():
|
63 |
-
data = get_available_free()
|
64 |
df = pd.DataFrame(data)
|
65 |
|
66 |
status_mapping = {"✓": 0, "⌀": 1, "---": 2}
|
@@ -75,8 +79,8 @@ def update_data():
|
|
75 |
|
76 |
return df
|
77 |
|
78 |
-
def display_table(search_query=""):
|
79 |
-
df = update_data()
|
80 |
if search_query:
|
81 |
filtered_df = df[df["Model"].str.contains(search_query, case=False)]
|
82 |
else:
|
@@ -116,8 +120,8 @@ def color_status(api_value, cell_value):
|
|
116 |
return 'background-color: red'
|
117 |
return ''
|
118 |
|
119 |
-
def search_models(query):
|
120 |
-
return display_table(query)
|
121 |
|
122 |
description = """
|
123 |
This is a space that retrieves the status of all supported HF LLM Serverless Inference APIs.
|
@@ -138,7 +142,7 @@ with gr.Blocks() as demo:
|
|
138 |
gr.Markdown("## HF Serverless LLM Inference API Status")
|
139 |
gr.Markdown(description)
|
140 |
search_box = gr.Textbox(label="Search for a model", placeholder="Type model name here...")
|
141 |
-
table = gr.Dataframe(value=display_table(), headers="keys")
|
142 |
|
143 |
search_box.change(fn=search_models, inputs=search_box, outputs=table)
|
144 |
|
|
|
5 |
from threading import Timer
|
6 |
|
7 |
HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
|
8 |
+
def get_available_free(use_cache = False):
|
9 |
+
if use_cache:
|
10 |
+
if os.path.exists("data.csv"):
|
11 |
+
print("Loading data from file...")
|
12 |
+
return pd.read_csv("data.csv").to_dict(orient='list')
|
13 |
models_dict = InferenceClient(token=HUGGINGFACE_TOKEN).list_deployed_models("text-generation-inference")
|
14 |
models = models_dict['text-generation'] + models_dict['text2text-generation']
|
15 |
models_vision = models_dict['image-text-to-text']
|
|
|
63 |
pd.DataFrame(models_conclusion).to_csv("data.csv", index=False)
|
64 |
return models_conclusion
|
65 |
|
66 |
+
def update_data(use_cache = False):
|
67 |
+
data = get_available_free(use_cache)
|
68 |
df = pd.DataFrame(data)
|
69 |
|
70 |
status_mapping = {"✓": 0, "⌀": 1, "---": 2}
|
|
|
79 |
|
80 |
return df
|
81 |
|
82 |
+
def display_table(search_query="", use_cache = False):
|
83 |
+
df = update_data(use_cache)
|
84 |
if search_query:
|
85 |
filtered_df = df[df["Model"].str.contains(search_query, case=False)]
|
86 |
else:
|
|
|
120 |
return 'background-color: red'
|
121 |
return ''
|
122 |
|
123 |
+
def search_models(query, use_cache = False):
|
124 |
+
return display_table(query, use_cache)
|
125 |
|
126 |
description = """
|
127 |
This is a space that retrieves the status of all supported HF LLM Serverless Inference APIs.
|
|
|
142 |
gr.Markdown("## HF Serverless LLM Inference API Status")
|
143 |
gr.Markdown(description)
|
144 |
search_box = gr.Textbox(label="Search for a model", placeholder="Type model name here...")
|
145 |
+
table = gr.Dataframe(value=display_table(True), headers="keys")
|
146 |
|
147 |
search_box.change(fn=search_models, inputs=search_box, outputs=table)
|
148 |
|