Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -141,11 +141,12 @@ def search_models(query, filters = [], use_cache = True):
|
|
141 |
return display_table(query, filters, use_cache)
|
142 |
|
143 |
description = """
|
144 |
-
This is a space that retrieves the status of
|
145 |
*Updates every 2 hours!*
|
146 |
|
147 |
If you are a student or you just want to quickly see what models are available to experiment for free, you are most likely highly interested on the free API huggingface provides... but like me, you struggle to find what models are available or not!
|
148 |
-
This is why I made this space that every 2 hours checks and updates the status of the list of LLMs that are in theory supported by retrieving the list in `InferenceClient().list_deployed_models()`.
|
|
|
149 |
|
150 |
So all you need is to plug:
|
151 |
```py
|
@@ -160,8 +161,9 @@ with gr.Blocks() as demo:
|
|
160 |
gr.Markdown("## HF Serverless LLM Inference API Status")
|
161 |
gr.Markdown(description)
|
162 |
search_box = gr.Textbox(label="Search for a model", placeholder="Type model name here...")
|
163 |
-
filter_box = gr.CheckboxGroup(choices=["Free", "Pro Subscription", "Not Responding", "Text Completion", "Chat Completion", "Vision"], label="Filters")
|
164 |
table = gr.Dataframe(value=display_table(use_cache=True), headers="keys")
|
|
|
|
|
165 |
|
166 |
def update_filters(query, filters):
|
167 |
return search_models(query, filters, use_cache=True)
|
|
|
141 |
return display_table(query, filters, use_cache)
|
142 |
|
143 |
description = """
|
144 |
+
This is a space that retrieves the status of supported HF LLM Serverless Inference APIs.
|
145 |
*Updates every 2 hours!*
|
146 |
|
147 |
If you are a student or you just want to quickly see what models are available to experiment for free, you are most likely highly interested on the free API huggingface provides... but like me, you struggle to find what models are available or not!
|
148 |
+
This is why I made this space that every 2 hours checks and updates the status of the list of LLMs that are cached and, in theory, supported by retrieving the list in `InferenceClient().list_deployed_models()`.
|
149 |
+
*It may not have all of the available ones... for now... it's WIP*
|
150 |
|
151 |
So all you need is to plug:
|
152 |
```py
|
|
|
161 |
gr.Markdown("## HF Serverless LLM Inference API Status")
|
162 |
gr.Markdown(description)
|
163 |
search_box = gr.Textbox(label="Search for a model", placeholder="Type model name here...")
|
|
|
164 |
table = gr.Dataframe(value=display_table(use_cache=True), headers="keys")
|
165 |
+
gr.Markdown("### Cached Endpoints")
|
166 |
+
filter_box = gr.CheckboxGroup(choices=["Free", "Pro Subscription", "Not Responding", "Text Completion", "Chat Completion", "Vision"], label="Filters")
|
167 |
|
168 |
def update_filters(query, filters):
|
169 |
return search_models(query, filters, use_cache=True)
|