Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,9 @@ import gradio as gr
|
|
3 |
import pandas as pd
|
4 |
from huggingface_hub import InferenceClient
|
5 |
from threading import Timer
|
|
|
6 |
|
7 |
-
HUGGINGFACE_TOKEN =
|
8 |
def get_available_free(use_cache = False):
|
9 |
if use_cache:
|
10 |
if os.path.exists(str(os.getcwd())+"/data.csv"):
|
@@ -25,15 +26,13 @@ def get_available_free(use_cache = False):
|
|
25 |
}
|
26 |
|
27 |
all_models = list(set(models + models_vision + models_others))
|
28 |
-
|
29 |
-
for m in all_models:
|
30 |
text_available = False
|
31 |
chat_available = False
|
32 |
vision_available = False
|
33 |
if m in models_vision:
|
34 |
vision_available = True
|
35 |
pro_sub = False
|
36 |
-
print(m)
|
37 |
try:
|
38 |
InferenceClient(m, timeout=10, token=HUGGINGFACE_TOKEN).text_generation("Hi.", max_new_tokens=1)
|
39 |
text_available = True
|
@@ -84,14 +83,23 @@ def update_data(use_cache = False):
|
|
84 |
|
85 |
return df
|
86 |
|
87 |
-
def display_table(search_query="", use_cache
|
88 |
df = update_data(use_cache)
|
89 |
search_query = str(search_query)
|
|
|
90 |
if search_query:
|
91 |
filtered_df = df[df["Model"].str.contains(search_query, case=False)]
|
92 |
else:
|
93 |
filtered_df = df
|
94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
styled_df = filtered_df.style.apply(apply_row_styles, axis=1, subset=["Model", "API", "Text Completion", "Chat Completion", "Vision"])
|
96 |
return styled_df
|
97 |
|
@@ -149,9 +157,14 @@ with gr.Blocks() as demo:
|
|
149 |
gr.Markdown("## HF Serverless LLM Inference API Status")
|
150 |
gr.Markdown(description)
|
151 |
search_box = gr.Textbox(label="Search for a model", placeholder="Type model name here...")
|
|
|
152 |
table = gr.Dataframe(value=display_table(use_cache=True), headers="keys")
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
|
|
155 |
|
156 |
def update_every_two_hours(first_run):
|
157 |
search_models(search_box.value, first_run)
|
|
|
3 |
import pandas as pd
|
4 |
from huggingface_hub import InferenceClient
|
5 |
from threading import Timer
|
6 |
+
from tqdm import tqdm
|
7 |
|
8 |
+
HUGGINGFACE_TOKEN =os.environ.get("HUGGINGFACE_TOKEN")
|
9 |
def get_available_free(use_cache = False):
|
10 |
if use_cache:
|
11 |
if os.path.exists(str(os.getcwd())+"/data.csv"):
|
|
|
26 |
}
|
27 |
|
28 |
all_models = list(set(models + models_vision + models_others))
|
29 |
+
for m in tqdm(all_models):
|
|
|
30 |
text_available = False
|
31 |
chat_available = False
|
32 |
vision_available = False
|
33 |
if m in models_vision:
|
34 |
vision_available = True
|
35 |
pro_sub = False
|
|
|
36 |
try:
|
37 |
InferenceClient(m, timeout=10, token=HUGGINGFACE_TOKEN).text_generation("Hi.", max_new_tokens=1)
|
38 |
text_available = True
|
|
|
83 |
|
84 |
return df
|
85 |
|
86 |
+
def display_table(search_query="", filters=[], use_cache=False):
|
87 |
df = update_data(use_cache)
|
88 |
search_query = str(search_query)
|
89 |
+
|
90 |
if search_query:
|
91 |
filtered_df = df[df["Model"].str.contains(search_query, case=False)]
|
92 |
else:
|
93 |
filtered_df = df
|
94 |
+
|
95 |
+
if filters:
|
96 |
+
if "Free" in filters:
|
97 |
+
filtered_df = filtered_df[filtered_df["API"] == "Free"]
|
98 |
+
if "Text Completion" in filters:
|
99 |
+
filtered_df = filtered_df[filtered_df["Text Completion"] == "✓"]
|
100 |
+
if "Chat Completion" in filters:
|
101 |
+
filtered_df = filtered_df[filtered_df["Chat Completion"] == "✓"]
|
102 |
+
|
103 |
styled_df = filtered_df.style.apply(apply_row_styles, axis=1, subset=["Model", "API", "Text Completion", "Chat Completion", "Vision"])
|
104 |
return styled_df
|
105 |
|
|
|
157 |
gr.Markdown("## HF Serverless LLM Inference API Status")
|
158 |
gr.Markdown(description)
|
159 |
search_box = gr.Textbox(label="Search for a model", placeholder="Type model name here...")
|
160 |
+
filter_box = gr.CheckboxGroup(choices=["Free", "Text Completion", "Chat Completion"], label="Filters")
|
161 |
table = gr.Dataframe(value=display_table(use_cache=True), headers="keys")
|
162 |
+
|
163 |
+
def update_filters(query, filters):
|
164 |
+
return search_models(query, filters, use_cache=True)
|
165 |
+
|
166 |
+
search_box.change(fn=update_filters, inputs=[search_box, filter_box], outputs=table)
|
167 |
+
filter_box.change(fn=update_filters, inputs=[search_box, filter_box], outputs=table)
|
168 |
|
169 |
def update_every_two_hours(first_run):
|
170 |
search_models(search_box.value, first_run)
|