AiAF commited on
Commit
042869d
·
verified ·
1 Parent(s): 1360bb6

Rename multimodalart-app.py to app.py

Browse files
Files changed (1) hide show
  1. multimodalart-app.py → app.py +35 -42
multimodalart-app.py → app.py RENAMED
@@ -76,37 +76,28 @@ def get_json_data(url):
76
  gr.Warning(f"Error fetching data from Civitai API for {url_split[4]}: {e}")
77
  return None
78
 
79
- def check_nsfw(json_data: Dict[str, Any], profile: Optional[gr.OAuthProfile]) -> bool:
80
- if not json_data:
81
- return False # Should not happen if get_json_data succeeded
 
 
 
82
 
83
- # Overall model boolean flag - highest priority
84
  if json_data.get("nsfw", False):
85
- print("Model flagged as NSFW by 'nsfw: true'.")
86
- gr.Info("Reason: Model explicitly flagged as NSFW on Civitai.")
87
- return False # Unsafe
88
-
89
- # Overall model numeric nsfwLevel - second priority. Max allowed is 5 (nsfwLevel < 6).
90
- # nsfwLevel definitions: None (1), Mild (2), Mature (4), Adult (5), X (8), R (16), XXX (32)
91
- model_nsfw_level = json_data.get("nsfwLevel", 0)
92
- if model_nsfw_level > 5: # Anything above "Adult"
93
- print(f"Model's overall nsfwLevel ({model_nsfw_level}) is > 5. Blocking.")
94
- gr.Info(f"Reason: Model's overall NSFW Level ({model_nsfw_level}) is above the allowed threshold (5).")
95
- return False # Unsafe
96
-
97
- # If uploader is trusted and the above checks passed, they bypass further version/image checks.
98
- if profile and profile.username in TRUSTED_UPLOADERS:
99
- print(f"User {profile.username} is trusted. Model 'nsfw' is false and overall nsfwLevel ({model_nsfw_level}) is <= 5. Allowing.")
100
- return True
101
-
102
- # For non-trusted users, check nsfwLevel of model versions and individual images/videos
103
  for model_version in json_data.get("modelVersions", []):
104
- version_nsfw_level = model_version.get("nsfwLevel", 0)
105
- if version_nsfw_level > 5:
106
- print(f"Model version nsfwLevel ({version_nsfw_level}) is > 5 for non-trusted user. Blocking.")
107
- gr.Info(f"Reason: A model version's NSFW Level ({version_nsfw_level}) is above 5.")
108
- return False
109
- return True # Safe for non-trusted user if all checks pass
 
 
110
 
111
 
112
  def get_prompts_from_image(image_id_str: str):
@@ -288,22 +279,24 @@ def download_file_with_auth(url, filename, folder="."):
288
  def process_url(url, profile, do_download=True, folder=".", hunyuan_type: Optional[str] = None):
289
  json_data = get_json_data(url)
290
  if json_data:
291
- if check_nsfw(json_data, profile):
292
- info = extract_info(json_data, hunyuan_type=hunyuan_type)
293
- if info:
294
- downloaded_files_summary = {}
295
- if do_download:
296
- gr.Info(f"Downloading files for {info['name']}...")
297
- downloaded_files_summary = download_files(info, folder)
298
- gr.Info(f"Finished downloading files for {info['name']}.")
299
- return info, downloaded_files_summary
300
- else:
301
- raise gr.Error("LoRA extraction failed. The base model might not be supported, or it's not a LoRA model, or no suitable files found in the version.")
 
 
 
302
  else:
303
- # check_nsfw now prints detailed reasons via gr.Info/print
304
- raise gr.Error("This model has content tagged as unsafe by CivitAI or exceeds NSFW level limits.")
305
  else:
306
- raise gr.Error("Failed to fetch model data from CivitAI API. Please check the URL and Civitai's status.")
307
 
308
 
309
  def create_readme(info: Dict[str, Any], downloaded_files: Dict[str, Any], user_repo_id: str, link_civit: bool = False, is_author: bool = True, folder: str = "."):
 
76
  gr.Warning(f"Error fetching data from Civitai API for {url_split[4]}: {e}")
77
  return None
78
 
79
+ def check_nsfw(json_data: Dict[str, Any]) -> bool:
80
+ """
81
+ Returns True if the model or any of its versions/images are NSFW.
82
+ We no longer block NSFW, only tag it.
83
+ """
84
+ nsfw_flag = False
85
 
 
86
  if json_data.get("nsfw", False):
87
+ nsfw_flag = True
88
+
89
+ if json_data.get("nsfwLevel", 0) > 0:
90
+ nsfw_flag = True
91
+
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  for model_version in json_data.get("modelVersions", []):
93
+ if model_version.get("nsfwLevel", 0) > 0:
94
+ nsfw_flag = True
95
+ for image_obj in model_version.get("images", []):
96
+ if image_obj.get("nsfwLevel", 0) > 0:
97
+ nsfw_flag = True
98
+
99
+ return nsfw_flag
100
+
101
 
102
 
103
  def get_prompts_from_image(image_id_str: str):
 
279
  def process_url(url, profile, do_download=True, folder=".", hunyuan_type: Optional[str] = None):
280
  json_data = get_json_data(url)
281
  if json_data:
282
+ # Always extract info, even if NSFW
283
+ info = extract_info(json_data, hunyuan_type=hunyuan_type)
284
+ if info:
285
+ # Detect NSFW but do not block
286
+ nsfw_flag = check_nsfw(json_data)
287
+ info["nsfw_flag"] = nsfw_flag
288
+
289
+ downloaded_files_summary = {}
290
+ if do_download:
291
+ gr.Info(f"Downloading files for {info['name']}...")
292
+ downloaded_files_summary = download_files(info, folder)
293
+ gr.Info(f"Finished downloading files for {info['name']}.")
294
+
295
+ return info, downloaded_files_summary
296
  else:
297
+ raise gr.Error("LoRA extraction failed. The base model might not be supported, or it's not a LoRA model, or no suitable files found in the version.")
 
298
  else:
299
+ raise gr.Error("Failed to fetch model data from CivitAI API. Please check the URL and CivitAI's status.")
300
 
301
 
302
  def create_readme(info: Dict[str, Any], downloaded_files: Dict[str, Any], user_repo_id: str, link_civit: bool = False, is_author: bool = True, folder: str = "."):