Spaces:
Runtime error
Runtime error
import requests | |
import gradio as gr | |
from threading import Timer | |
def fetch_models(): | |
# Example URL, replace with the actual API endpoint | |
url = "https://api.example.com/v1/models" | |
headers = { | |
"Authorization": f"Bearer {TOKEN}" | |
} | |
response = requests.get(url, headers=headers) | |
response.raise_for_status() | |
return response.json() | |
def loop_query_data(): | |
global all_models, first_run | |
try: | |
models_dict = fetch_models() | |
models = models_dict.get('text-generation', []) + models_dict.get('text2text-generation', []) | |
all_models = models | |
except KeyError as e: | |
print(f"KeyError: {e} not found in the models dictionary") | |
all_models = [] | |
def search_models(query, filters, use_cache=False): | |
# Add your logic here to filter models based on the query and filters | |
filtered_models = [model for model in all_models if query.lower() in model.lower()] | |
# Apply additional filtering based on the 'filters' parameter | |
return filtered_models | |
def display_table(use_cache=False): | |
# Create a table display of models | |
data = [{"Model Name": model} for model in all_models] | |
return data | |
first_run = True | |
all_models = [] | |
loop_query_data() | |
with gr.Blocks() as demo: | |
gr.Markdown("## HF Serverless LLM Inference API Status") | |
gr.Markdown("Description of the API") | |
search_box = gr.Textbox(label="Search for a model", placeholder="Type model name here...") | |
filter_box = gr.CheckboxGroup(choices=["Free", "Pro Subscription", "Not Responding", "Text Completion", "Chat Completion", "Vision"], label="Filters") | |
table = gr.Dataframe(value=display_table(use_cache=True), headers="keys") | |
def update_filters(query, filters): | |
return search_models(query, filters, use_cache=True) | |
search_box.change(fn=update_filters, inputs=[search_box, filter_box], outputs=table) | |
filter_box.change(fn=update_filters, inputs=[search_box, filter_box], outputs=table) | |
def update_every_two_hours(first_run): | |
loop_query_data() | |
search_models(search_box.value, [], use_cache=first_run) | |
Timer(7200, update_every_two_hours, args=(False,)).start() # 7200 seconds = 2 hours | |
Timer(0, update_every_two_hours, args=(first_run,)).start() | |
demo.launch() | |