awacke1 commited on
Commit
cf0448c
Β·
verified Β·
1 Parent(s): 69ebbe6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -15
app.py CHANGED
@@ -84,13 +84,17 @@ def generate_html_page(username, models, datasets):
84
 
85
  return html_file_path
86
 
87
- # Cache the HTML generation process using Streamlit's caching decorator
88
  @st.cache_data(show_spinner=False)
89
- def get_cached_html_page(username):
 
 
 
 
90
  user_data = asyncio.run(fetch_user_content(username))
91
  if "error" in user_data:
92
  return None, user_data["error"]
93
- return generate_html_page(username, user_data["models"], user_data["datasets"]), user_data
94
 
95
  # Streamlit app setup - the nerve center of our operation! πŸŽ›οΈ
96
  st.title("Hugging Face User Content Display - Let's Automate Some Fun! πŸŽ‰")
@@ -116,31 +120,31 @@ if st.button("Show User Content"):
116
  st.markdown(f"**{username}** [πŸ”— Profile](https://huggingface.co/{username})")
117
 
118
  # Generate HTML page and provide download link - because who wouldn't want a custom webpage? 🌐
119
- html_file_path, user_data = get_cached_html_page(username)
120
-
121
- if not html_file_path:
122
- st.warning(f"{username}: {user_data} - Looks like the AI needs a coffee break β˜•")
123
  else:
 
124
  st.markdown(f"[πŸ“„ Download {username}'s HTML Page]({html_file_path})")
125
 
126
  # Add to statistics for Plotly graphs
127
  stats["username"].append(username)
128
- stats["models_count"].append(len(user_data["models"]))
129
- stats["datasets_count"].append(len(user_data["datasets"]))
130
 
131
  # Models section with expander - 🧠 because AI models are brainy! 🧠
132
- with st.expander(f"🧠 Models ({len(user_data['models'])})", expanded=False):
133
- if user_data['models']:
134
- for model in user_data['models']:
135
  model_name = model.modelId.split("/")[-1]
136
  st.markdown(f"- [{model_name}](https://huggingface.co/{model.modelId})")
137
  else:
138
  st.markdown("No models found. Did you check under the rug? πŸ•΅οΈβ€β™‚οΈ")
139
 
140
  # Datasets section with expander - πŸ“š because data is the foundation of AI! πŸ“š
141
- with st.expander(f"πŸ“š Datasets ({len(user_data['datasets'])})", expanded=False):
142
- if user_data['datasets']:
143
- for dataset in user_data['datasets']:
144
  dataset_name = dataset.id.split("/")[-1]
145
  st.markdown(f"- [{dataset_name}](https://huggingface.co/datasets/{dataset.id})")
146
  else:
 
84
 
85
  return html_file_path
86
 
87
+ # Cache the HTML file path using Streamlit's caching decorator
88
  @st.cache_data(show_spinner=False)
89
+ def get_cached_html_file(username):
90
+ return generate_html_page(username, *get_user_content(username))
91
+
92
+ # Fetch user content from the API (without caching)
93
+ def get_user_content(username):
94
  user_data = asyncio.run(fetch_user_content(username))
95
  if "error" in user_data:
96
  return None, user_data["error"]
97
+ return user_data["models"], user_data["datasets"]
98
 
99
  # Streamlit app setup - the nerve center of our operation! πŸŽ›οΈ
100
  st.title("Hugging Face User Content Display - Let's Automate Some Fun! πŸŽ‰")
 
120
  st.markdown(f"**{username}** [πŸ”— Profile](https://huggingface.co/{username})")
121
 
122
  # Generate HTML page and provide download link - because who wouldn't want a custom webpage? 🌐
123
+ models, datasets = get_user_content(username)
124
+ if models is None:
125
+ st.warning(f"{username}: {datasets} - Looks like the AI needs a coffee break β˜•")
 
126
  else:
127
+ html_file_path = get_cached_html_file(username)
128
  st.markdown(f"[πŸ“„ Download {username}'s HTML Page]({html_file_path})")
129
 
130
  # Add to statistics for Plotly graphs
131
  stats["username"].append(username)
132
+ stats["models_count"].append(len(models))
133
+ stats["datasets_count"].append(len(datasets))
134
 
135
  # Models section with expander - 🧠 because AI models are brainy! 🧠
136
+ with st.expander(f"🧠 Models ({len(models)})", expanded=False):
137
+ if models:
138
+ for model in models:
139
  model_name = model.modelId.split("/")[-1]
140
  st.markdown(f"- [{model_name}](https://huggingface.co/{model.modelId})")
141
  else:
142
  st.markdown("No models found. Did you check under the rug? πŸ•΅οΈβ€β™‚οΈ")
143
 
144
  # Datasets section with expander - πŸ“š because data is the foundation of AI! πŸ“š
145
+ with st.expander(f"πŸ“š Datasets ({len(datasets)})", expanded=False):
146
+ if datasets:
147
+ for dataset in datasets:
148
  dataset_name = dataset.id.split("/")[-1]
149
  st.markdown(f"- [{dataset_name}](https://huggingface.co/datasets/{dataset.id})")
150
  else: