John6666 commited on
Commit
e93cd05
·
verified ·
1 Parent(s): da2e837

Upload 8 files

Browse files
Files changed (4) hide show
  1. README.md +2 -2
  2. app.py +4 -15
  3. externalmod.py +54 -26
  4. requirements.txt +1 -1
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: 868 AI Art Models Toy World (Gradio 4.x)
3
  emoji: 🪅🌐
4
  colorFrom: green
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 4.39.0
8
  app_file: app.py
9
  pinned: false
10
  duplicated_from:
 
1
  ---
2
+ title: 868 AI Art Models Toy World (Gradio 5.x)
3
  emoji: 🪅🌐
4
  colorFrom: green
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 5.0.1
8
  app_file: app.py
9
  pinned: false
10
  duplicated_from:
app.py CHANGED
@@ -85,8 +85,7 @@ def gen_fn(model_index, prompt, nprompt="", height=None, width=None, steps=None,
85
  return result
86
 
87
  css="""
88
- .gradio-container {background-image: linear-gradient(#254150, #1e2f40, #182634) !important;
89
- color: #ffaa66 !important; font-family: 'IBM Plex Sans', sans-serif !important;
90
  text-align: center; max-width: 1200px; margin: 0 auto; !important;}
91
  h1 {font-size: 6em; color: #ffc99f; margin-top: 30px; margin-bottom: 30px;
92
  text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;}
@@ -98,19 +97,9 @@ h4 {display: inline-block; color: #ffffff !important; }
98
  .text-gray-500 {color: #ffc99f !important;}
99
  .gr-box {background-image: linear-gradient(#182634, #1e2f40, #254150) !important; border-top-color: #000000 !important;
100
  border-right-color: #ffffff !important; border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
101
- .gr-input {color: #ffc99f; !important; background-color: #254150 !important;}
102
- :root {--neutral-100: #000000 !important;}
103
- .gr-button {color: #ffffff !important; text-shadow: 1px 1px 0 rgba(0, 0, 0, 1) !important;
104
- background-image: linear-gradient(#76635a, #d2a489) !important; border-radius: 24px !important;
105
- border: solid 1px !important; border-top-color: #ffc99f !important; border-right-color: #000000 !important;
106
- border-bottom-color: #000000 !important; border-left-color: #ffc99f !important; padding: 6px 30px;}
107
- .gr-button:active {color: #ffc99f !important; font-size: 98% !important; text-shadow: 0px 0px 0 rgba(0, 0, 0, 1) !important;
108
- background-image: linear-gradient(#d2a489, #76635a) !important; border-top-color: #000000 !important;
109
- border-right-color: #ffffff !important; border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
110
- .gr-button:hover {filter: brightness(130%);}
111
  """
112
 
113
- with gr.Blocks(fill_width=True, css=css) as myface:
114
  gr.HTML(f"""
115
  <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
116
  <div>
@@ -186,7 +175,7 @@ with gr.Blocks(fill_width=True, css=css) as myface:
186
  with gr.Column(scale=100):
187
  with gr.Group():
188
  magic1 = gr.Textbox(label="Your Prompt", lines=4, elem_classes="gr-box") #Positive
189
- with gr.Accordion("Advanced", open=False, visible=True, elem_classes="gr-box"):
190
  neg_input = gr.Textbox(label='Negative prompt', lines=1, elem_classes="gr-box")
191
  with gr.Row():
192
  width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes="gr-box")
@@ -217,5 +206,5 @@ with gr.Blocks(fill_width=True, css=css) as myface:
217
  use_short.click(short_prompt, inputs=[input_text], outputs=magic1)
218
  see_prompts.click(text_it1, inputs=[input_text], outputs=magic1)
219
 
220
- myface.queue()
221
  myface.launch(inline=True, show_api=False)
 
85
  return result
86
 
87
  css="""
88
+ .gradio-container {!important; font-family: 'IBM Plex Sans', sans-serif !important;
 
89
  text-align: center; max-width: 1200px; margin: 0 auto; !important;}
90
  h1 {font-size: 6em; color: #ffc99f; margin-top: 30px; margin-bottom: 30px;
91
  text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;}
 
97
  .text-gray-500 {color: #ffc99f !important;}
98
  .gr-box {background-image: linear-gradient(#182634, #1e2f40, #254150) !important; border-top-color: #000000 !important;
99
  border-right-color: #ffffff !important; border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
 
 
 
 
 
 
 
 
 
 
100
  """
101
 
102
+ with gr.Blocks(theme='John6666/YntecLight', fill_width=True, css=css) as myface:
103
  gr.HTML(f"""
104
  <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
105
  <div>
 
175
  with gr.Column(scale=100):
176
  with gr.Group():
177
  magic1 = gr.Textbox(label="Your Prompt", lines=4, elem_classes="gr-box") #Positive
178
+ with gr.Accordion("Advanced", open=False, visible=True):
179
  neg_input = gr.Textbox(label='Negative prompt', lines=1, elem_classes="gr-box")
180
  with gr.Row():
181
  width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes="gr-box")
 
206
  use_short.click(short_prompt, inputs=[input_text], outputs=magic1)
207
  see_prompts.click(text_it1, inputs=[input_text], outputs=magic1)
208
 
209
+ #myface.queue()
210
  myface.launch(inline=True, show_api=False)
externalmod.py CHANGED
@@ -9,7 +9,7 @@ import re
9
  import tempfile
10
  import warnings
11
  from pathlib import Path
12
- from typing import TYPE_CHECKING, Callable
13
 
14
  import httpx
15
  import huggingface_hub
@@ -33,6 +33,7 @@ if TYPE_CHECKING:
33
  from gradio.interface import Interface
34
 
35
 
 
36
  server_timeout = 600
37
 
38
 
@@ -40,7 +41,7 @@ server_timeout = 600
40
  def load(
41
  name: str,
42
  src: str | None = None,
43
- hf_token: str | None = None,
44
  alias: str | None = None,
45
  **kwargs,
46
  ) -> Blocks:
@@ -51,7 +52,7 @@ def load(
51
  Parameters:
52
  name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
53
  src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
54
- hf_token: optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.
55
  alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
56
  Returns:
57
  a Gradio Blocks object for the given model
@@ -68,7 +69,7 @@ def load(
68
  def load_blocks_from_repo(
69
  name: str,
70
  src: str | None = None,
71
- hf_token: str | None = None,
72
  alias: str | None = None,
73
  **kwargs,
74
  ) -> Blocks:
@@ -92,7 +93,7 @@ def load_blocks_from_repo(
92
  if src.lower() not in factory_methods:
93
  raise ValueError(f"parameter: src must be one of {factory_methods.keys()}")
94
 
95
- if hf_token is not None:
96
  if Context.hf_token is not None and Context.hf_token != hf_token:
97
  warnings.warn(
98
  """You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
@@ -103,12 +104,16 @@ def load_blocks_from_repo(
103
  return blocks
104
 
105
 
106
- def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwargs):
 
 
107
  model_url = f"https://huggingface.co/{model_name}"
108
  api_url = f"https://api-inference.huggingface.co/models/{model_name}"
109
  print(f"Fetching model from: {model_url}")
110
 
111
- headers = {"Authorization": f"Bearer {hf_token}"} if hf_token is not None else {}
 
 
112
  response = httpx.request("GET", api_url, headers=headers)
113
  if response.status_code != 200:
114
  raise ModelNotFoundError(
@@ -371,7 +376,11 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
371
  def query_huggingface_inference_endpoints(*data, **kwargs):
372
  if preprocess is not None:
373
  data = preprocess(*data)
374
- data = fn(*data, **kwargs) # type: ignore
 
 
 
 
375
  if postprocess is not None:
376
  data = postprocess(data) # type: ignore
377
  return data
@@ -383,7 +392,7 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
383
  "inputs": inputs,
384
  "outputs": outputs,
385
  "title": model_name,
386
- # "examples": examples,
387
  }
388
 
389
  kwargs = dict(interface_info, **kwargs)
@@ -394,19 +403,12 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
394
  def from_spaces(
395
  space_name: str, hf_token: str | None, alias: str | None, **kwargs
396
  ) -> Blocks:
397
- client = Client(
398
- space_name,
399
- hf_token=hf_token,
400
- download_files=False,
401
- _skip_components=False,
402
- )
403
-
404
  space_url = f"https://huggingface.co/spaces/{space_name}"
405
 
406
  print(f"Fetching Space from: {space_url}")
407
 
408
  headers = {}
409
- if hf_token is not None:
410
  headers["Authorization"] = f"Bearer {hf_token}"
411
 
412
  iframe_url = (
@@ -443,8 +445,7 @@ def from_spaces(
443
  "Blocks or Interface locally. You may find this Guide helpful: "
444
  "https://gradio.app/using_blocks_like_functions/"
445
  )
446
- if client.app_version < version.Version("4.0.0b14"):
447
- return from_spaces_blocks(space=space_name, hf_token=hf_token)
448
 
449
 
450
  def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
@@ -489,7 +490,7 @@ def from_spaces_interface(
489
  config = external_utils.streamline_spaces_interface(config)
490
  api_url = f"{iframe_url}/api/predict/"
491
  headers = {"Content-Type": "application/json"}
492
- if hf_token is not None:
493
  headers["Authorization"] = f"Bearer {hf_token}"
494
 
495
  # The function should call the API with preprocessed data
@@ -529,7 +530,7 @@ def gr_Interface_load(
529
  src: str | None = None,
530
  hf_token: str | None = None,
531
  alias: str | None = None,
532
- **kwargs,
533
  ) -> Blocks:
534
  try:
535
  return load_blocks_from_repo(name, src, hf_token, alias)
@@ -543,8 +544,8 @@ def list_uniq(l):
543
 
544
 
545
  def get_status(model_name: str):
546
- from huggingface_hub import InferenceClient
547
- client = InferenceClient(timeout=10)
548
  return client.get_model_status(model_name)
549
 
550
 
@@ -563,22 +564,49 @@ def is_loadable(model_name: str, force_gpu: bool = False):
563
 
564
  def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
565
  from huggingface_hub import HfApi
566
- api = HfApi()
567
  default_tags = ["diffusers"]
568
  if not sort: sort = "last_modified"
569
  limit = limit * 20 if check_status and force_gpu else limit * 5
570
  models = []
571
  try:
572
- model_infos = api.list_models(author=author, task="text-to-image",
573
  tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit)
574
  except Exception as e:
575
  print(f"Error: Failed to list models.")
576
  print(e)
577
  return models
578
  for model in model_infos:
579
- if not model.private and not model.gated:
580
  loadable = is_loadable(model.id, force_gpu) if check_status else True
581
  if not_tag and not_tag in model.tags or not loadable: continue
582
  models.append(model.id)
583
  if len(models) == limit: break
584
  return models
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  import tempfile
10
  import warnings
11
  from pathlib import Path
12
+ from typing import TYPE_CHECKING, Callable, Literal
13
 
14
  import httpx
15
  import huggingface_hub
 
33
  from gradio.interface import Interface
34
 
35
 
36
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
37
  server_timeout = 600
38
 
39
 
 
41
  def load(
42
  name: str,
43
  src: str | None = None,
44
+ hf_token: str | Literal[False] | None = None,
45
  alias: str | None = None,
46
  **kwargs,
47
  ) -> Blocks:
 
52
  Parameters:
53
  name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
54
  src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
55
+ hf_token: optional access token for loading private Hugging Face Hub models or spaces. Will default to the locally saved token if not provided. Pass `token=False` if you don't want to send your token to the server. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide a token if you are loading a trusted private Space as it can be read by the Space you are loading.
56
  alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
57
  Returns:
58
  a Gradio Blocks object for the given model
 
69
  def load_blocks_from_repo(
70
  name: str,
71
  src: str | None = None,
72
+ hf_token: str | Literal[False] | None = None,
73
  alias: str | None = None,
74
  **kwargs,
75
  ) -> Blocks:
 
93
  if src.lower() not in factory_methods:
94
  raise ValueError(f"parameter: src must be one of {factory_methods.keys()}")
95
 
96
+ if hf_token is not None and hf_token is not False:
97
  if Context.hf_token is not None and Context.hf_token != hf_token:
98
  warnings.warn(
99
  """You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
 
104
  return blocks
105
 
106
 
107
+ def from_model(
108
+ model_name: str, hf_token: str | Literal[False] | None, alias: str | None, **kwargs
109
+ ):
110
  model_url = f"https://huggingface.co/{model_name}"
111
  api_url = f"https://api-inference.huggingface.co/models/{model_name}"
112
  print(f"Fetching model from: {model_url}")
113
 
114
+ headers = (
115
+ {} if hf_token in [False, None] else {"Authorization": f"Bearer {hf_token}"}
116
+ )
117
  response = httpx.request("GET", api_url, headers=headers)
118
  if response.status_code != 200:
119
  raise ModelNotFoundError(
 
376
  def query_huggingface_inference_endpoints(*data, **kwargs):
377
  if preprocess is not None:
378
  data = preprocess(*data)
379
+ try:
380
+ data = fn(*data, **kwargs) # type: ignore
381
+ except huggingface_hub.utils.HfHubHTTPError as e:
382
+ if "429" in str(e):
383
+ raise TooManyRequestsError() from e
384
  if postprocess is not None:
385
  data = postprocess(data) # type: ignore
386
  return data
 
392
  "inputs": inputs,
393
  "outputs": outputs,
394
  "title": model_name,
395
+ #"examples": examples,
396
  }
397
 
398
  kwargs = dict(interface_info, **kwargs)
 
403
  def from_spaces(
404
  space_name: str, hf_token: str | None, alias: str | None, **kwargs
405
  ) -> Blocks:
 
 
 
 
 
 
 
406
  space_url = f"https://huggingface.co/spaces/{space_name}"
407
 
408
  print(f"Fetching Space from: {space_url}")
409
 
410
  headers = {}
411
+ if hf_token not in [False, None]:
412
  headers["Authorization"] = f"Bearer {hf_token}"
413
 
414
  iframe_url = (
 
445
  "Blocks or Interface locally. You may find this Guide helpful: "
446
  "https://gradio.app/using_blocks_like_functions/"
447
  )
448
+ return from_spaces_blocks(space=space_name, hf_token=hf_token)
 
449
 
450
 
451
  def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
 
490
  config = external_utils.streamline_spaces_interface(config)
491
  api_url = f"{iframe_url}/api/predict/"
492
  headers = {"Content-Type": "application/json"}
493
+ if hf_token not in [False, None]:
494
  headers["Authorization"] = f"Bearer {hf_token}"
495
 
496
  # The function should call the API with preprocessed data
 
530
  src: str | None = None,
531
  hf_token: str | None = None,
532
  alias: str | None = None,
533
+ **kwargs, # ignore
534
  ) -> Blocks:
535
  try:
536
  return load_blocks_from_repo(name, src, hf_token, alias)
 
544
 
545
 
546
  def get_status(model_name: str):
547
+ from huggingface_hub import AsyncInferenceClient
548
+ client = AsyncInferenceClient(token=HF_TOKEN, timeout=10)
549
  return client.get_model_status(model_name)
550
 
551
 
 
564
 
565
  def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
566
  from huggingface_hub import HfApi
567
+ api = HfApi(token=HF_TOKEN)
568
  default_tags = ["diffusers"]
569
  if not sort: sort = "last_modified"
570
  limit = limit * 20 if check_status and force_gpu else limit * 5
571
  models = []
572
  try:
573
+ model_infos = api.list_models(author=author, #task="text-to-image",
574
  tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit)
575
  except Exception as e:
576
  print(f"Error: Failed to list models.")
577
  print(e)
578
  return models
579
  for model in model_infos:
580
+ if not model.private and not model.gated or HF_TOKEN is not None:
581
  loadable = is_loadable(model.id, force_gpu) if check_status else True
582
  if not_tag and not_tag in model.tags or not loadable: continue
583
  models.append(model.id)
584
  if len(models) == limit: break
585
  return models
586
+
587
+
588
+ def save_image(image, savefile, modelname, prompt, nprompt, height=0, width=0, steps=0, cfg=0, seed=-1):
589
+ from PIL import Image, PngImagePlugin
590
+ import json
591
+ try:
592
+ metadata = {"prompt": prompt, "negative_prompt": nprompt, "Model": {"Model": modelname.split("/")[-1]}}
593
+ if steps > 0: metadata["num_inference_steps"] = steps
594
+ if cfg > 0: metadata["guidance_scale"] = cfg
595
+ if seed != -1: metadata["seed"] = seed
596
+ if width > 0 and height > 0: metadata["resolution"] = f"{width} x {height}"
597
+ metadata_str = json.dumps(metadata)
598
+ info = PngImagePlugin.PngInfo()
599
+ info.add_text("metadata", metadata_str)
600
+ image.save(savefile, "PNG", pnginfo=info)
601
+ return str(Path(savefile).resolve())
602
+ except Exception as e:
603
+ print(f"Failed to save image file: {e}")
604
+ raise Exception(f"Failed to save image file:") from e
605
+
606
+
607
+ def randomize_seed():
608
+ from random import seed, randint
609
+ MAX_SEED = 2**32-1
610
+ seed()
611
+ rseed = randint(0, MAX_SEED)
612
+ return rseed
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
- transformers
2
  numpy<2
3
  torch==2.2.0
 
1
+ transformers==4.44.0
2
  numpy<2
3
  torch==2.2.0