Update app.py
Browse files
app.py
CHANGED
@@ -84,13 +84,17 @@ def generate_html_page(username, models, datasets):
|
|
84 |
|
85 |
return html_file_path
|
86 |
|
87 |
-
# Cache the HTML
|
88 |
@st.cache_data(show_spinner=False)
|
89 |
-
def
|
|
|
|
|
|
|
|
|
90 |
user_data = asyncio.run(fetch_user_content(username))
|
91 |
if "error" in user_data:
|
92 |
return None, user_data["error"]
|
93 |
-
return
|
94 |
|
95 |
# Streamlit app setup - the nerve center of our operation! ποΈ
|
96 |
st.title("Hugging Face User Content Display - Let's Automate Some Fun! π")
|
@@ -116,31 +120,31 @@ if st.button("Show User Content"):
|
|
116 |
st.markdown(f"**{username}** [π Profile](https://huggingface.co/{username})")
|
117 |
|
118 |
# Generate HTML page and provide download link - because who wouldn't want a custom webpage? π
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
st.warning(f"{username}: {user_data} - Looks like the AI needs a coffee break β")
|
123 |
else:
|
|
|
124 |
st.markdown(f"[π Download {username}'s HTML Page]({html_file_path})")
|
125 |
|
126 |
# Add to statistics for Plotly graphs
|
127 |
stats["username"].append(username)
|
128 |
-
stats["models_count"].append(len(
|
129 |
-
stats["datasets_count"].append(len(
|
130 |
|
131 |
# Models section with expander - π§ because AI models are brainy! π§
|
132 |
-
with st.expander(f"π§ Models ({len(
|
133 |
-
if
|
134 |
-
for model in
|
135 |
model_name = model.modelId.split("/")[-1]
|
136 |
st.markdown(f"- [{model_name}](https://huggingface.co/{model.modelId})")
|
137 |
else:
|
138 |
st.markdown("No models found. Did you check under the rug? π΅οΈββοΈ")
|
139 |
|
140 |
# Datasets section with expander - π because data is the foundation of AI! π
|
141 |
-
with st.expander(f"π Datasets ({len(
|
142 |
-
if
|
143 |
-
for dataset in
|
144 |
dataset_name = dataset.id.split("/")[-1]
|
145 |
st.markdown(f"- [{dataset_name}](https://huggingface.co/datasets/{dataset.id})")
|
146 |
else:
|
|
|
84 |
|
85 |
return html_file_path
|
86 |
|
87 |
+
# Cache the HTML file path using Streamlit's caching decorator
|
88 |
@st.cache_data(show_spinner=False)
|
89 |
+
def get_cached_html_file(username):
|
90 |
+
return generate_html_page(username, *get_user_content(username))
|
91 |
+
|
92 |
+
# Fetch user content from the API (without caching)
|
93 |
+
def get_user_content(username):
|
94 |
user_data = asyncio.run(fetch_user_content(username))
|
95 |
if "error" in user_data:
|
96 |
return None, user_data["error"]
|
97 |
+
return user_data["models"], user_data["datasets"]
|
98 |
|
99 |
# Streamlit app setup - the nerve center of our operation! ποΈ
|
100 |
st.title("Hugging Face User Content Display - Let's Automate Some Fun! π")
|
|
|
120 |
st.markdown(f"**{username}** [π Profile](https://huggingface.co/{username})")
|
121 |
|
122 |
# Generate HTML page and provide download link - because who wouldn't want a custom webpage? π
|
123 |
+
models, datasets = get_user_content(username)
|
124 |
+
if models is None:
|
125 |
+
st.warning(f"{username}: {datasets} - Looks like the AI needs a coffee break β")
|
|
|
126 |
else:
|
127 |
+
html_file_path = get_cached_html_file(username)
|
128 |
st.markdown(f"[π Download {username}'s HTML Page]({html_file_path})")
|
129 |
|
130 |
# Add to statistics for Plotly graphs
|
131 |
stats["username"].append(username)
|
132 |
+
stats["models_count"].append(len(models))
|
133 |
+
stats["datasets_count"].append(len(datasets))
|
134 |
|
135 |
# Models section with expander - π§ because AI models are brainy! π§
|
136 |
+
with st.expander(f"π§ Models ({len(models)})", expanded=False):
|
137 |
+
if models:
|
138 |
+
for model in models:
|
139 |
model_name = model.modelId.split("/")[-1]
|
140 |
st.markdown(f"- [{model_name}](https://huggingface.co/{model.modelId})")
|
141 |
else:
|
142 |
st.markdown("No models found. Did you check under the rug? π΅οΈββοΈ")
|
143 |
|
144 |
# Datasets section with expander - π because data is the foundation of AI! π
|
145 |
+
with st.expander(f"π Datasets ({len(datasets)})", expanded=False):
|
146 |
+
if datasets:
|
147 |
+
for dataset in datasets:
|
148 |
dataset_name = dataset.id.split("/")[-1]
|
149 |
st.markdown(f"- [{dataset_name}](https://huggingface.co/datasets/{dataset.id})")
|
150 |
else:
|