John6666 commited on
Commit
23aa4a5
1 Parent(s): 1198ac5

Upload 18 files

Browse files
Files changed (5) hide show
  1. app.py +11 -10
  2. dc.py +11 -75
  3. llmdolphin.py +86 -45
  4. modutils.py +23 -3
  5. tagger/tagger.py +9 -16
app.py CHANGED
@@ -4,7 +4,7 @@ import numpy as np
4
 
5
  # DiffuseCraft
6
  from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_samplers, save_image_history,
7
- get_vaes, enable_model_recom_prompt, enable_diffusers_model_detail, extract_exif_data, esrgan_upscale, UPSCALER_KEYS,
8
  preset_quality, preset_styles, process_style_prompt, get_all_lora_tupled_list, update_loras, apply_lora_prompt,
9
  download_my_lora, search_civitai_lora, update_civitai_selection, select_civitai_lora, search_civitai_lora_json,
10
  get_t2i_model_info, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
@@ -48,6 +48,7 @@ css = """
48
 
49
  with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60, 3600)) as demo:
50
  gr.Markdown("# Votepurchase Multiple Model", elem_classes="title")
 
51
  with gr.Tab("Image Generator"):
52
  with gr.Column(elem_id="col-container"):
53
  with gr.Row():
@@ -203,7 +204,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
203
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
204
  guidance_scale, num_inference_steps, model_name,
205
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
206
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type],
207
  outputs=[result],
208
  queue=True,
209
  show_progress="full",
@@ -216,21 +217,21 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
216
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
217
  guidance_scale, num_inference_steps, model_name,
218
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
219
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type],
220
  outputs=[result],
221
  queue=False,
222
  show_api=True,
223
  api_name="infer_translate",
224
  ).success(
225
  fn=dolphin_respond_auto,
226
- inputs=[prompt, chatbot],
227
  outputs=[chatbot, result, prompt],
228
  queue=True,
229
  show_progress="full",
230
  show_api=False,
231
  ).success(
232
  fn=dolphin_parse_simple,
233
- inputs=[prompt, chatbot],
234
  outputs=[prompt],
235
  queue=False,
236
  show_api=False,
@@ -239,7 +240,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
239
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
240
  guidance_scale, num_inference_steps, model_name,
241
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
242
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type],
243
  outputs=[result],
244
  queue=True,
245
  show_progress="full",
@@ -289,7 +290,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
289
  )
290
  lora_search_civitai_gallery.select(update_civitai_selection, None, [lora_search_civitai_result], queue=False, show_api=False)
291
 
292
- recom_prompt.change(enable_model_recom_prompt, [recom_prompt], [recom_prompt], queue=False, show_api=False)
293
  gr.on(
294
  triggers=[quality_selector.change, style_selector.change],
295
  fn=process_style_prompt,
@@ -300,12 +301,12 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
300
  show_api=False,
301
  )
302
 
303
- model_detail.change(enable_diffusers_model_detail, [model_detail, model_name], [model_detail, model_name], queue=False, show_api=False)
304
  model_name.change(get_t2i_model_info, [model_name], [model_info], queue=False, show_api=False)
305
 
306
- chat_model.change(select_dolphin_model, [chat_model], [chat_model, chat_format, chat_model_info], queue=True, show_progress="full", show_api=False)\
307
  .success(lambda: None, None, chatbot, queue=False, show_api=False)
308
- chat_format.change(select_dolphin_format, [chat_format], [chat_format], queue=False, show_api=False)\
309
  .success(lambda: None, None, chatbot, queue=False, show_api=False)
310
 
311
  # Tagger
 
4
 
5
  # DiffuseCraft
6
  from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_samplers, save_image_history,
7
+ get_vaes, enable_diffusers_model_detail, extract_exif_data, esrgan_upscale, UPSCALER_KEYS,
8
  preset_quality, preset_styles, process_style_prompt, get_all_lora_tupled_list, update_loras, apply_lora_prompt,
9
  download_my_lora, search_civitai_lora, update_civitai_selection, select_civitai_lora, search_civitai_lora_json,
10
  get_t2i_model_info, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
 
48
 
49
  with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60, 3600)) as demo:
50
  gr.Markdown("# Votepurchase Multiple Model", elem_classes="title")
51
+ state = gr.State(value={})
52
  with gr.Tab("Image Generator"):
53
  with gr.Column(elem_id="col-container"):
54
  with gr.Row():
 
204
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
205
  guidance_scale, num_inference_steps, model_name,
206
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
207
+ sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type, recom_prompt],
208
  outputs=[result],
209
  queue=True,
210
  show_progress="full",
 
217
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
218
  guidance_scale, num_inference_steps, model_name,
219
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
220
+ sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type, recom_prompt],
221
  outputs=[result],
222
  queue=False,
223
  show_api=True,
224
  api_name="infer_translate",
225
  ).success(
226
  fn=dolphin_respond_auto,
227
+ inputs=[prompt, chatbot, chat_model, chat_sysmsg, chat_tokens, chat_temperature, chat_topp, chat_topk, chat_rp, state],
228
  outputs=[chatbot, result, prompt],
229
  queue=True,
230
  show_progress="full",
231
  show_api=False,
232
  ).success(
233
  fn=dolphin_parse_simple,
234
+ inputs=[prompt, chatbot, state],
235
  outputs=[prompt],
236
  queue=False,
237
  show_api=False,
 
240
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
241
  guidance_scale, num_inference_steps, model_name,
242
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
243
+ sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type, recom_prompt],
244
  outputs=[result],
245
  queue=True,
246
  show_progress="full",
 
290
  )
291
  lora_search_civitai_gallery.select(update_civitai_selection, None, [lora_search_civitai_result], queue=False, show_api=False)
292
 
293
+ #recom_prompt.change(enable_model_recom_prompt, [recom_prompt], [recom_prompt], queue=False, show_api=False)
294
  gr.on(
295
  triggers=[quality_selector.change, style_selector.change],
296
  fn=process_style_prompt,
 
301
  show_api=False,
302
  )
303
 
304
+ model_detail.change(enable_diffusers_model_detail, [model_detail, model_name, state], [model_detail, model_name, state], queue=False, show_api=False)
305
  model_name.change(get_t2i_model_info, [model_name], [model_info], queue=False, show_api=False)
306
 
307
+ chat_model.change(select_dolphin_model, [chat_model, state], [chat_model, chat_format, chat_model_info, state], queue=True, show_progress="full", show_api=False)\
308
  .success(lambda: None, None, chatbot, queue=False, show_api=False)
309
+ chat_format.change(select_dolphin_format, [chat_format, state], [chat_format, state], queue=False, show_api=False)\
310
  .success(lambda: None, None, chatbot, queue=False, show_api=False)
311
 
312
  # Tagger
dc.py CHANGED
@@ -351,7 +351,6 @@ class GuiSD:
351
 
352
  ## BEGIN MOD
353
  loras_list = [s if s else "None" for s in loras_list]
354
- prompt, neg_prompt = insert_model_recom_prompt(prompt, neg_prompt, model_name)
355
  global lora_model_list
356
  lora_model_list = get_lora_model_list()
357
  ## END MOD
@@ -696,7 +695,7 @@ import random
696
  import json
697
  import shutil
698
  from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
699
- get_local_model_list, get_private_lora_model_lists, get_valid_lora_name,
700
  get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
701
  normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history)
702
 
@@ -706,7 +705,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
706
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
707
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
708
  sampler = "Euler", vae = None, translate=True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
709
- progress=gr.Progress(track_tqdm=True)):
710
  MAX_SEED = np.iinfo(np.int32).max
711
 
712
  image_previews = True
@@ -727,7 +726,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
727
  prompt = translate_to_en(prompt)
728
  negative_prompt = translate_to_en(prompt)
729
 
730
- prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
731
  progress(0.5, desc="Preparing...")
732
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
733
  set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
@@ -761,70 +760,14 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
761
 
762
  return output_image
763
 
764
- #@spaces.GPU
765
- def __infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
766
- model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
767
- lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
768
- sampler = "Euler a", vae = None, translate=True, progress=gr.Progress(track_tqdm=True)):
769
- MAX_SEED = np.iinfo(np.int32).max
770
-
771
- load_lora_cpu = False
772
- verbose_info = False
773
- gpu_duration = 59
774
-
775
- images: list[tuple[PIL.Image.Image, str | None]] = []
776
- info_state = info_images = ""
777
- progress(0, desc="Preparing...")
778
-
779
- if randomize_seed:
780
- seed = random.randint(0, MAX_SEED)
781
-
782
- generator = torch.Generator().manual_seed(seed).seed()
783
-
784
- if translate:
785
- prompt = translate_to_en(prompt)
786
- negative_prompt = translate_to_en(prompt)
787
-
788
- prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
789
- progress(0.5, desc="Preparing...")
790
- lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
791
- set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
792
- lora1 = get_valid_lora_path(lora1)
793
- lora2 = get_valid_lora_path(lora2)
794
- lora3 = get_valid_lora_path(lora3)
795
- lora4 = get_valid_lora_path(lora4)
796
- lora5 = get_valid_lora_path(lora5)
797
- progress(1, desc="Preparation completed. Starting inference...")
798
-
799
- progress(0, desc="Loading model...")
800
- sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0])
801
- progress(1, desc="Model loaded.")
802
- progress(0, desc="Starting Inference...")
803
- info_state, images, info_images = sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
804
- guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
805
- lora4, lora4_wt, lora5, lora5_wt, sampler,
806
- height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
807
- None, None, None, 0.35, 100, 200, 0.1, 0.1, 1.0, 0., 1., False, "Classic", None,
808
- 1.0, 100, 10, 30, 0.55, "Use same sampler", "", "",
809
- False, True, 1, True, False, False, False, False, "./images", False, False, False, True, 1, 0.55,
810
- False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
811
- False, "", "", 0.35, True, True, False, 4, 4, 32,
812
- True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, 0.0,
813
- load_lora_cpu, verbose_info, gpu_duration
814
- )
815
- progress(1, desc="Inference completed.")
816
- output_image = images[0][0] if images else None
817
-
818
- return output_image
819
-
820
 
821
  #@spaces.GPU
822
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
823
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
824
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
825
  sampler = "Euler", vae = None, translate = True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
826
- progress=gr.Progress(track_tqdm=True)):
827
- return gr.update(visible=True)
828
 
829
 
830
  infer.zerogpu = True
@@ -843,17 +786,16 @@ def get_vaes():
843
  return vae_model_list
844
 
845
 
846
- show_diffusers_model_list_detail = False
847
  cached_diffusers_model_tupled_list = get_tupled_model_list(load_diffusers_format_model)
848
- def get_diffusers_model_list():
 
849
  if show_diffusers_model_list_detail:
850
  return cached_diffusers_model_tupled_list
851
  else:
852
  return load_diffusers_format_model
853
 
854
 
855
- def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = ""):
856
- global show_diffusers_model_list_detail
857
  show_diffusers_model_list_detail = is_enable
858
  new_value = model_name
859
  index = 0
@@ -863,7 +805,8 @@ def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = "")
863
  new_value = cached_diffusers_model_tupled_list[index][1]
864
  else:
865
  new_value = load_diffusers_format_model[index]
866
- return gr.update(value=is_enable), gr.update(value=new_value, choices=get_diffusers_model_list())
 
867
 
868
 
869
  def load_model_prompt_dict():
@@ -879,7 +822,6 @@ def load_model_prompt_dict():
879
  model_prompt_dict = load_model_prompt_dict()
880
 
881
 
882
- model_recom_prompt_enabled = True
883
  animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
884
  animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
885
  pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
@@ -888,7 +830,7 @@ other_ps = to_list("anime artwork, anime style, studio anime, highly detailed, c
888
  other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
889
  default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
890
  default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
891
- def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
892
  if not model_recom_prompt_enabled or not model_name: return prompt, neg_prompt
893
  prompts = to_list(prompt)
894
  neg_prompts = to_list(neg_prompt)
@@ -911,12 +853,6 @@ def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name
911
  return prompt, neg_prompt
912
 
913
 
914
- def enable_model_recom_prompt(is_enable: bool = True):
915
- global model_recom_prompt_enabled
916
- model_recom_prompt_enabled = is_enable
917
- return is_enable
918
-
919
-
920
  private_lora_dict = {}
921
  try:
922
  with open('lora_dict.json', encoding='utf-8') as f:
 
351
 
352
  ## BEGIN MOD
353
  loras_list = [s if s else "None" for s in loras_list]
 
354
  global lora_model_list
355
  lora_model_list = get_lora_model_list()
356
  ## END MOD
 
695
  import json
696
  import shutil
697
  from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
698
+ get_local_model_list, get_private_lora_model_lists, get_valid_lora_name, get_state, set_state,
699
  get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
700
  normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history)
701
 
 
705
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
706
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
707
  sampler = "Euler", vae = None, translate=True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
708
+ recom_prompt = True, progress=gr.Progress(track_tqdm=True)):
709
  MAX_SEED = np.iinfo(np.int32).max
710
 
711
  image_previews = True
 
726
  prompt = translate_to_en(prompt)
727
  negative_prompt = translate_to_en(prompt)
728
 
729
+ prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name, recom_prompt)
730
  progress(0.5, desc="Preparing...")
731
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
732
  set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
 
760
 
761
  return output_image
762
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
763
 
764
  #@spaces.GPU
765
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
766
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
767
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
768
  sampler = "Euler", vae = None, translate = True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
769
+ recom_prompt = True, progress=gr.Progress(track_tqdm=True)):
770
+ return gr.update()
771
 
772
 
773
  infer.zerogpu = True
 
786
  return vae_model_list
787
 
788
 
 
789
  cached_diffusers_model_tupled_list = get_tupled_model_list(load_diffusers_format_model)
790
+ def get_diffusers_model_list(state: dict = {}):
791
+ show_diffusers_model_list_detail = get_state(state, "show_diffusers_model_list_detail")
792
  if show_diffusers_model_list_detail:
793
  return cached_diffusers_model_tupled_list
794
  else:
795
  return load_diffusers_format_model
796
 
797
 
798
+ def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = "", state: dict = {}):
 
799
  show_diffusers_model_list_detail = is_enable
800
  new_value = model_name
801
  index = 0
 
805
  new_value = cached_diffusers_model_tupled_list[index][1]
806
  else:
807
  new_value = load_diffusers_format_model[index]
808
+ set_state(state, "show_diffusers_model_list_detail", show_diffusers_model_list_detail)
809
+ return gr.update(value=is_enable), gr.update(value=new_value, choices=get_diffusers_model_list(state)), state
810
 
811
 
812
  def load_model_prompt_dict():
 
822
  model_prompt_dict = load_model_prompt_dict()
823
 
824
 
 
825
  animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
826
  animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
827
  pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
 
830
  other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
831
  default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
832
  default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
833
+ def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None", model_recom_prompt_enabled = True):
834
  if not model_recom_prompt_enabled or not model_name: return prompt, neg_prompt
835
  prompts = to_list(prompt)
836
  neg_prompts = to_list(neg_prompt)
 
853
  return prompt, neg_prompt
854
 
855
 
 
 
 
 
 
 
856
  private_lora_dict = {}
857
  try:
858
  with open('lora_dict.json', encoding='utf-8') as f:
llmdolphin.py CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
3
  from pathlib import Path
4
  import re
5
  import torch
 
6
  from huggingface_hub import hf_hub_download, HfApi
7
  from llama_cpp import Llama
8
  from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
@@ -75,6 +76,27 @@ llm_models = {
75
  "ChatWaifu_22B_v2.0_preview.Q4_K_S.gguf": ["mradermacher/ChatWaifu_22B_v2.0_preview-GGUF", MessagesFormatterType.MISTRAL],
76
  "ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
77
  "ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  "Fraken-Maid-TW-Slerp.i1-Q5_K_M.gguf": ["mradermacher/Fraken-Maid-TW-Slerp-i1-GGUF", MessagesFormatterType.MISTRAL],
79
  "KunoichiLake-2x7b.Q4_K_M.gguf": ["mradermacher/KunoichiLake-2x7b-GGUF", MessagesFormatterType.MISTRAL],
80
  "Llama-3.1-SuperNova-8B-Lite_TIES_with_Base-Q4_K_M.gguf": ["bartowski/Llama-3.1-SuperNova-8B-Lite_TIES_with_Base-GGUF", MessagesFormatterType.LLAMA_3],
@@ -908,25 +930,44 @@ llm_formats = {
908
  llm_languages = ["English", "Japanese", "Chinese", "Korean", "Spanish", "Portuguese", "German", "French", "Finnish", "Russian"]
909
  llm_models_tupled_list = []
910
  default_llm_model_filename = list(llm_models.keys())[0]
911
- override_llm_format = None
912
  device = "cuda" if torch.cuda.is_available() else "cpu"
913
 
914
 
915
- def to_list(s):
916
  return [x.strip() for x in s.split(",") if not s == ""]
917
 
918
 
919
- def list_uniq(l):
920
  return sorted(set(l), key=l.index)
921
 
922
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
923
  @wrapt_timeout_decorator.timeout(dec_timeout=3.5)
924
- def to_list_ja(s):
925
  s = re.sub(r'[、。]', ',', s)
926
  return [x.strip() for x in s.split(",") if not s == ""]
927
 
928
 
929
- def is_japanese(s):
930
  import unicodedata
931
  for ch in s:
932
  name = unicodedata.name(ch, "")
@@ -964,7 +1005,7 @@ def download_llm_models():
964
  llm_models_tupled_list.append((name, value))
965
 
966
 
967
- def download_llm_model(filename):
968
  if not filename in llm_models.keys(): return default_llm_model_filename
969
  try:
970
  hf_hub_download(repo_id = llm_models[filename][0], filename = filename, local_dir = llm_models_dir)
@@ -975,7 +1016,7 @@ def download_llm_model(filename):
975
  return filename
976
 
977
 
978
- def get_dolphin_model_info(filename):
979
  md = "None"
980
  items = llm_models.get(filename, None)
981
  if items:
@@ -983,20 +1024,18 @@ def get_dolphin_model_info(filename):
983
  return md
984
 
985
 
986
- def select_dolphin_model(filename, progress=gr.Progress(track_tqdm=True)):
987
- global override_llm_format
988
- override_llm_format = None
989
  progress(0, desc="Loading model...")
990
  value = download_llm_model(filename)
991
  progress(1, desc="Model loaded.")
992
  md = get_dolphin_model_info(filename)
993
- return gr.update(value=value, choices=get_dolphin_models()), gr.update(value=get_dolphin_model_format(value)), gr.update(value=md)
994
 
995
 
996
- def select_dolphin_format(format_name):
997
- global override_llm_format
998
- override_llm_format = llm_formats[format_name]
999
- return gr.update(value=format_name)
1000
 
1001
 
1002
  download_llm_model(default_llm_model_filename)
@@ -1017,14 +1056,14 @@ def get_key_from_value(d, val):
1017
  return None
1018
 
1019
 
1020
- def get_dolphin_model_format(filename):
1021
  if not filename in llm_models.keys(): filename = default_llm_model_filename
1022
  format = llm_models[filename][1]
1023
  format_name = get_key_from_value(llm_formats, format)
1024
  return format_name
1025
 
1026
 
1027
- def add_dolphin_models(query, format_name):
1028
  global llm_models
1029
  api = HfApi()
1030
  add_models = {}
@@ -1055,8 +1094,6 @@ def add_dolphin_models(query, format_name):
1055
  return gr.update(choices=choices, value=choices[-1][1])
1056
 
1057
 
1058
- dolphin_output_language = "English"
1059
- dolphin_sysprompt_mode = "Default"
1060
  dolphin_system_prompt = {"Default": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
1061
  The message must have the following [Tags] generated in strict accordance with the following [Rules]:
1062
  ```
@@ -1247,8 +1284,11 @@ Output should be enclosed in //GENBEGIN//:// and //://GENEND//. The text to be g
1247
  "Chat with LLM": r"You are a helpful AI assistant. Respond in <LANGUAGE>."}
1248
 
1249
 
1250
- def get_dolphin_sysprompt():
1251
- prompt = re.sub('<LANGUAGE>', dolphin_output_language, dolphin_system_prompt.get(dolphin_sysprompt_mode, ""))
 
 
 
1252
  return prompt
1253
 
1254
 
@@ -1256,23 +1296,21 @@ def get_dolphin_sysprompt_mode():
1256
  return list(dolphin_system_prompt.keys())
1257
 
1258
 
1259
- def select_dolphin_sysprompt(key: str):
1260
- global dolphin_sysprompt_mode
1261
- if not key in dolphin_system_prompt.keys():
1262
- dolphin_sysprompt_mode = "Default"
1263
- else:
1264
- dolphin_sysprompt_mode = key
1265
- return gr.update(value=get_dolphin_sysprompt())
1266
 
1267
 
1268
  def get_dolphin_languages():
1269
  return llm_languages
1270
 
1271
 
1272
- def select_dolphin_language(lang: str):
1273
- global dolphin_output_language
1274
- dolphin_output_language = lang
1275
- return gr.update(value=get_dolphin_sysprompt())
1276
 
1277
 
1278
  @wrapt_timeout_decorator.timeout(dec_timeout=5.0)
@@ -1293,15 +1331,14 @@ def dolphin_respond(
1293
  top_p: float = 0.95,
1294
  top_k: int = 40,
1295
  repeat_penalty: float = 1.1,
 
1296
  progress=gr.Progress(track_tqdm=True),
1297
  ):
1298
  try:
1299
  progress(0, desc="Processing...")
1300
-
1301
- if override_llm_format:
1302
- chat_template = override_llm_format
1303
- else:
1304
- chat_template = llm_models[model][1]
1305
 
1306
  llm = Llama(
1307
  model_path=str(Path(f"{llm_models_dir}/{model}")),
@@ -1363,8 +1400,10 @@ def dolphin_respond(
1363
 
1364
  def dolphin_parse(
1365
  history: list[tuple[str, str]],
 
1366
  ):
1367
  try:
 
1368
  if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1:
1369
  return "", gr.update(), gr.update()
1370
  msg = history[-1][0]
@@ -1392,16 +1431,16 @@ def dolphin_respond_auto(
1392
  top_p: float = 0.95,
1393
  top_k: int = 40,
1394
  repeat_penalty: float = 1.1,
 
1395
  progress=gr.Progress(track_tqdm=True),
1396
  ):
1397
  try:
1398
  #if not is_japanese(message): return [(None, None)]
1399
  progress(0, desc="Processing...")
1400
 
1401
- if override_llm_format:
1402
- chat_template = override_llm_format
1403
- else:
1404
- chat_template = llm_models[model][1]
1405
 
1406
  llm = Llama(
1407
  model_path=str(Path(f"{llm_models_dir}/{model}")),
@@ -1465,9 +1504,11 @@ def dolphin_respond_auto(
1465
  def dolphin_parse_simple(
1466
  message: str,
1467
  history: list[tuple[str, str]],
 
1468
  ):
1469
  try:
1470
  #if not is_japanese(message): return message
 
1471
  if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1: return message
1472
  msg = history[-1][0]
1473
  raw_prompt = get_raw_prompt(msg)
@@ -1499,12 +1540,12 @@ def respond_playground(
1499
  top_p,
1500
  top_k,
1501
  repeat_penalty,
 
1502
  ):
1503
  try:
1504
- if override_llm_format:
1505
- chat_template = override_llm_format
1506
- else:
1507
- chat_template = llm_models[model][1]
1508
 
1509
  llm = Llama(
1510
  model_path=str(Path(f"{llm_models_dir}/{model}")),
 
3
  from pathlib import Path
4
  import re
5
  import torch
6
+ from typing import Any
7
  from huggingface_hub import hf_hub_download, HfApi
8
  from llama_cpp import Llama
9
  from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
 
76
  "ChatWaifu_22B_v2.0_preview.Q4_K_S.gguf": ["mradermacher/ChatWaifu_22B_v2.0_preview-GGUF", MessagesFormatterType.MISTRAL],
77
  "ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
78
  "ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
79
+ "dolphin-2.6-mistral-7b-dpo-laser.Q4_K_S.gguf": ["mradermacher/dolphin-2.6-mistral-7b-dpo-laser-GGUF", MessagesFormatterType.MISTRAL],
80
+ "Flowable-Docs-Llama-3.1-8B.Q5_K_M.gguf": ["mradermacher/Flowable-Docs-Llama-3.1-8B-GGUF", MessagesFormatterType.LLAMA_3],
81
+ "slimorca-gemma2-9b-fft.Q4_K_M.gguf": ["mradermacher/slimorca-gemma2-9b-fft-GGUF", MessagesFormatterType.ALPACA],
82
+ "TQ2.5-14B-Sugarquill-v1-Q4_K_M.gguf": ["bartowski/TQ2.5-14B-Sugarquill-v1-GGUF", MessagesFormatterType.OPEN_CHAT],
83
+ "magnum-v3-9b-customgemma2.i1-Q4_K_M.gguf": ["mradermacher/magnum-v3-9b-customgemma2-i1-GGUF", MessagesFormatterType.ALPACA],
84
+ "Captain_BMO-12B.Q4_K_M.gguf": ["mradermacher/Captain_BMO-12B-GGUF", MessagesFormatterType.MISTRAL],
85
+ "LemonP-8B-Model_Stock.i1-Q5_K_M.gguf": ["mradermacher/LemonP-8B-Model_Stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
86
+ "Ice0.31-08.11-RP.i1-Q5_K_M.gguf": ["mradermacher/Ice0.31-08.11-RP-i1-GGUF", MessagesFormatterType.MISTRAL],
87
+ "EVA-Qwen2.5-14B-v0.2.i1-Q4_K_M.gguf": ["mradermacher/EVA-Qwen2.5-14B-v0.2-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
88
+ "L3.1-Dark-Planet-SpinFire-Uncensored-8B-D_AU-Q4_k_m.gguf": ["DavidAU/L3.1-Dark-Planet-SpinFire-Uncensored-8B-GGUF", MessagesFormatterType.LLAMA_3],
89
+ "EndlessRP-v2-7B.Q5_K_M.gguf": ["mradermacher/EndlessRP-v2-7B-GGUF", MessagesFormatterType.MISTRAL],
90
+ "badger-lambda-0-llama-3-8b.i1-Q5_K_M.gguf": ["mradermacher/badger-lambda-0-llama-3-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
91
+ "L3.1-Artemis-e2-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Artemis-e2-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
92
+ "MN-12B-Inferor-v0.0.Q4_K_M.gguf": ["mradermacher/MN-12B-Inferor-v0.0-GGUF", MessagesFormatterType.MISTRAL],
93
+ "Eclipse-13B-dpo.i1-Q4_K_M.gguf": ["mradermacher/Eclipse-13B-dpo-i1-GGUF", MessagesFormatterType.MISTRAL],
94
+ "Epic_Fiction-8b-v0.1.Q5_K_M.gguf": ["mradermacher/Epic_Fiction-8b-v0.1-GGUF", MessagesFormatterType.MISTRAL],
95
+ "Llama-3-8B-StoryGenerator.i1-Q5_K_M.gguf": ["mradermacher/Llama-3-8B-StoryGenerator-i1-GGUF", MessagesFormatterType.LLAMA_3],
96
+ "badger-mu-llama-3-8b.i1-Q5_K_M.gguf": ["mradermacher/badger-mu-llama-3-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
97
+ "badger-writer-llama-3-8b.i1-Q5_K_M.gguf": ["mradermacher/badger-writer-llama-3-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
98
+ "Qwen-Rui-SE.Q5_K_M.gguf": ["mradermacher/Qwen-Rui-SE-GGUF", MessagesFormatterType.OPEN_CHAT],
99
+ "Valor-7B-v0.1.i1-Q4_K_M.gguf": ["mradermacher/Valor-7B-v0.1-i1-GGUF", MessagesFormatterType.MISTRAL],
100
  "Fraken-Maid-TW-Slerp.i1-Q5_K_M.gguf": ["mradermacher/Fraken-Maid-TW-Slerp-i1-GGUF", MessagesFormatterType.MISTRAL],
101
  "KunoichiLake-2x7b.Q4_K_M.gguf": ["mradermacher/KunoichiLake-2x7b-GGUF", MessagesFormatterType.MISTRAL],
102
  "Llama-3.1-SuperNova-8B-Lite_TIES_with_Base-Q4_K_M.gguf": ["bartowski/Llama-3.1-SuperNova-8B-Lite_TIES_with_Base-GGUF", MessagesFormatterType.LLAMA_3],
 
930
  llm_languages = ["English", "Japanese", "Chinese", "Korean", "Spanish", "Portuguese", "German", "French", "Finnish", "Russian"]
931
  llm_models_tupled_list = []
932
  default_llm_model_filename = list(llm_models.keys())[0]
 
933
  device = "cuda" if torch.cuda.is_available() else "cpu"
934
 
935
 
936
+ def to_list(s: str):
937
  return [x.strip() for x in s.split(",") if not s == ""]
938
 
939
 
940
+ def list_uniq(l: list):
941
  return sorted(set(l), key=l.index)
942
 
943
 
944
+ DEFAULT_STATE = {
945
+ "dolphin_sysprompt_mode": "Default",
946
+ "dolphin_output_language": llm_languages[0],
947
+ }
948
+
949
+
950
+ def get_state(state: dict, key: str):
951
+ if key in state.keys(): return state[key]
952
+ elif key in DEFAULT_STATE.keys():
953
+ print(f"State '{key}' not found. Use dedault value.")
954
+ return DEFAULT_STATE[key]
955
+ else:
956
+ print(f"State '{key}' not found.")
957
+ return None
958
+
959
+
960
+ def set_state(state: dict, key: str, value: Any):
961
+ state[key] = value
962
+
963
+
964
  @wrapt_timeout_decorator.timeout(dec_timeout=3.5)
965
+ def to_list_ja(s: str):
966
  s = re.sub(r'[、。]', ',', s)
967
  return [x.strip() for x in s.split(",") if not s == ""]
968
 
969
 
970
+ def is_japanese(s: str):
971
  import unicodedata
972
  for ch in s:
973
  name = unicodedata.name(ch, "")
 
1005
  llm_models_tupled_list.append((name, value))
1006
 
1007
 
1008
+ def download_llm_model(filename: str):
1009
  if not filename in llm_models.keys(): return default_llm_model_filename
1010
  try:
1011
  hf_hub_download(repo_id = llm_models[filename][0], filename = filename, local_dir = llm_models_dir)
 
1016
  return filename
1017
 
1018
 
1019
+ def get_dolphin_model_info(filename: str):
1020
  md = "None"
1021
  items = llm_models.get(filename, None)
1022
  if items:
 
1024
  return md
1025
 
1026
 
1027
+ def select_dolphin_model(filename: str, state: dict, progress=gr.Progress(track_tqdm=True)):
1028
+ set_state(state, "override_llm_format", None)
 
1029
  progress(0, desc="Loading model...")
1030
  value = download_llm_model(filename)
1031
  progress(1, desc="Model loaded.")
1032
  md = get_dolphin_model_info(filename)
1033
+ return gr.update(value=value, choices=get_dolphin_models()), gr.update(value=get_dolphin_model_format(value)), gr.update(value=md), state
1034
 
1035
 
1036
+ def select_dolphin_format(format_name: str, state: dict):
1037
+ set_state(state, "override_llm_format", llm_formats[format_name])
1038
+ return gr.update(value=format_name), state
 
1039
 
1040
 
1041
  download_llm_model(default_llm_model_filename)
 
1056
  return None
1057
 
1058
 
1059
+ def get_dolphin_model_format(filename: str):
1060
  if not filename in llm_models.keys(): filename = default_llm_model_filename
1061
  format = llm_models[filename][1]
1062
  format_name = get_key_from_value(llm_formats, format)
1063
  return format_name
1064
 
1065
 
1066
+ def add_dolphin_models(query: str, format_name: str):
1067
  global llm_models
1068
  api = HfApi()
1069
  add_models = {}
 
1094
  return gr.update(choices=choices, value=choices[-1][1])
1095
 
1096
 
 
 
1097
  dolphin_system_prompt = {"Default": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
1098
  The message must have the following [Tags] generated in strict accordance with the following [Rules]:
1099
  ```
 
1284
  "Chat with LLM": r"You are a helpful AI assistant. Respond in <LANGUAGE>."}
1285
 
1286
 
1287
+ def get_dolphin_sysprompt(state: dict={}):
1288
+ dolphin_sysprompt_mode = get_state(state, "dolphin_sysprompt_mode")
1289
+ dolphin_output_language = get_state(state, "dolphin_output_language")
1290
+ prompt = re.sub('<LANGUAGE>', dolphin_output_language if dolphin_output_language else llm_languages[0],
1291
+ dolphin_system_prompt.get(dolphin_sysprompt_mode, dolphin_system_prompt[list(dolphin_system_prompt.keys())[0]]))
1292
  return prompt
1293
 
1294
 
 
1296
  return list(dolphin_system_prompt.keys())
1297
 
1298
 
1299
+ def select_dolphin_sysprompt(key: str, state: dict):
1300
+ dolphin_sysprompt_mode = get_state(state, "dolphin_sysprompt_mode")
1301
+ if not key in dolphin_system_prompt.keys(): dolphin_sysprompt_mode = "Default"
1302
+ else: dolphin_sysprompt_mode = key
1303
+ set_state(state, "dolphin_sysprompt_mode", dolphin_sysprompt_mode)
1304
+ return gr.update(value=get_dolphin_sysprompt(state)), state
 
1305
 
1306
 
1307
  def get_dolphin_languages():
1308
  return llm_languages
1309
 
1310
 
1311
+ def select_dolphin_language(lang: str, state: dict):
1312
+ set_state(state, "dolphin_output_language", lang)
1313
+ return gr.update(value=get_dolphin_sysprompt(state)), state
 
1314
 
1315
 
1316
  @wrapt_timeout_decorator.timeout(dec_timeout=5.0)
 
1331
  top_p: float = 0.95,
1332
  top_k: int = 40,
1333
  repeat_penalty: float = 1.1,
1334
+ state: dict = {},
1335
  progress=gr.Progress(track_tqdm=True),
1336
  ):
1337
  try:
1338
  progress(0, desc="Processing...")
1339
+ override_llm_format = get_state(state, "override_llm_format")
1340
+ if override_llm_format: chat_template = override_llm_format
1341
+ else: chat_template = llm_models[model][1]
 
 
1342
 
1343
  llm = Llama(
1344
  model_path=str(Path(f"{llm_models_dir}/{model}")),
 
1400
 
1401
  def dolphin_parse(
1402
  history: list[tuple[str, str]],
1403
+ state: dict,
1404
  ):
1405
  try:
1406
+ dolphin_sysprompt_mode = get_state(state, "dolphin_sysprompt_mode")
1407
  if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1:
1408
  return "", gr.update(), gr.update()
1409
  msg = history[-1][0]
 
1431
  top_p: float = 0.95,
1432
  top_k: int = 40,
1433
  repeat_penalty: float = 1.1,
1434
+ state: dict = {},
1435
  progress=gr.Progress(track_tqdm=True),
1436
  ):
1437
  try:
1438
  #if not is_japanese(message): return [(None, None)]
1439
  progress(0, desc="Processing...")
1440
 
1441
+ override_llm_format = get_state(state, "override_llm_format")
1442
+ if override_llm_format: chat_template = override_llm_format
1443
+ else: chat_template = llm_models[model][1]
 
1444
 
1445
  llm = Llama(
1446
  model_path=str(Path(f"{llm_models_dir}/{model}")),
 
1504
  def dolphin_parse_simple(
1505
  message: str,
1506
  history: list[tuple[str, str]],
1507
+ state: dict,
1508
  ):
1509
  try:
1510
  #if not is_japanese(message): return message
1511
+ dolphin_sysprompt_mode = get_state(state, "dolphin_sysprompt_mode")
1512
  if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1: return message
1513
  msg = history[-1][0]
1514
  raw_prompt = get_raw_prompt(msg)
 
1540
  top_p,
1541
  top_k,
1542
  repeat_penalty,
1543
+ state,
1544
  ):
1545
  try:
1546
+ override_llm_format = get_state(state, "override_llm_format")
1547
+ if override_llm_format: chat_template = override_llm_format
1548
+ else: chat_template = llm_models[model][1]
 
1549
 
1550
  llm = Llama(
1551
  model_path=str(Path(f"{llm_models_dir}/{model}")),
modutils.py CHANGED
@@ -12,6 +12,7 @@ from requests.adapters import HTTPAdapter
12
  from urllib3.util import Retry
13
  import urllib.parse
14
  import pandas as pd
 
15
  from huggingface_hub import HfApi, HfFolder, hf_hub_download, snapshot_download
16
  from translatepy import Translator
17
  from unidecode import unidecode
@@ -52,6 +53,25 @@ def is_repo_name(s):
52
  return re.fullmatch(r'^[^/]+?/[^/]+?$', s)
53
 
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  translator = Translator()
56
  def translate_to_en(input: str):
57
  try:
@@ -753,10 +773,10 @@ def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2,
753
  key = result[0][0]
754
  wt = result[0][1]
755
  path = to_lora_path(key)
756
- if not key in loras_dict.keys() or not path:
757
  path = get_valid_lora_name(path)
758
  if not path or path == "None": continue
759
- if path in lora_paths:
760
  continue
761
  elif not on1:
762
  lora1 = path
@@ -777,7 +797,7 @@ def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2,
777
  lora4 = path
778
  lora_paths = [lora1, lora2, lora3, lora4, lora5]
779
  lora4_wt = safe_float(wt)
780
- on4, label4, tag4, md4 = get_lora_info(lora4)
781
  elif not on5:
782
  lora5 = path
783
  lora_paths = [lora1, lora2, lora3, lora4, lora5]
 
12
  from urllib3.util import Retry
13
  import urllib.parse
14
  import pandas as pd
15
+ from typing import Any
16
  from huggingface_hub import HfApi, HfFolder, hf_hub_download, snapshot_download
17
  from translatepy import Translator
18
  from unidecode import unidecode
 
53
  return re.fullmatch(r'^[^/]+?/[^/]+?$', s)
54
 
55
 
56
+ DEFAULT_STATE = {
57
+ "show_diffusers_model_list_detail": False,
58
+ }
59
+
60
+
61
+ def get_state(state: dict, key: str):
62
+ if key in state.keys(): return state[key]
63
+ elif key in DEFAULT_STATE.keys():
64
+ print(f"State '{key}' not found. Use dedault value.")
65
+ return DEFAULT_STATE[key]
66
+ else:
67
+ print(f"State '{key}' not found.")
68
+ return None
69
+
70
+
71
+ def set_state(state: dict, key: str, value: Any):
72
+ state[key] = value
73
+
74
+
75
  translator = Translator()
76
  def translate_to_en(input: str):
77
  try:
 
773
  key = result[0][0]
774
  wt = result[0][1]
775
  path = to_lora_path(key)
776
+ if not key in loras_dict.keys() or not Path(path).exists():
777
  path = get_valid_lora_name(path)
778
  if not path or path == "None": continue
779
+ if path in lora_paths or key in lora_paths:
780
  continue
781
  elif not on1:
782
  lora1 = path
 
797
  lora4 = path
798
  lora_paths = [lora1, lora2, lora3, lora4, lora5]
799
  lora4_wt = safe_float(wt)
800
+ on4 = True
801
  elif not on5:
802
  lora5 = path
803
  lora_paths = [lora1, lora2, lora3, lora4, lora5]
tagger/tagger.py CHANGED
@@ -285,9 +285,6 @@ def convert_tags_to_ja(input_prompt: str = ""):
285
  return ", ".join(out_tags)
286
 
287
 
288
- enable_auto_recom_prompt = True
289
-
290
-
291
  animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
292
  animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
293
  pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
@@ -297,7 +294,6 @@ other_nps = to_list("photo, deformed, black and white, realism, disfigured, low
297
  default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
298
  default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
299
  def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "None"):
300
- global enable_auto_recom_prompt
301
  prompts = to_list(prompt)
302
  neg_prompts = to_list(neg_prompt)
303
 
@@ -307,16 +303,12 @@ def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "Non
307
  last_empty_p = [""] if not prompts and type != "None" else []
308
  last_empty_np = [""] if not neg_prompts and type != "None" else []
309
 
310
- if type == "Auto":
311
- enable_auto_recom_prompt = True
312
- else:
313
- enable_auto_recom_prompt = False
314
- if type == "Animagine":
315
- prompts = prompts + animagine_ps
316
- neg_prompts = neg_prompts + animagine_nps
317
- elif type == "Pony":
318
- prompts = prompts + pony_ps
319
- neg_prompts = neg_prompts + pony_nps
320
 
321
  prompt = ", ".join(list_uniq(prompts) + last_empty_p)
322
  neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
@@ -329,7 +321,7 @@ def load_model_prompt_dict():
329
  dict = {}
330
  path = 'model_dict.json' if Path('model_dict.json').exists() else './tagger/model_dict.json'
331
  try:
332
- with open('model_dict.json', encoding='utf-8') as f:
333
  dict = json.load(f)
334
  except Exception:
335
  pass
@@ -339,7 +331,8 @@ def load_model_prompt_dict():
339
  model_prompt_dict = load_model_prompt_dict()
340
 
341
 
342
- def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
 
343
  if not model_name or not enable_auto_recom_prompt: return prompt, neg_prompt
344
  prompts = to_list(prompt)
345
  neg_prompts = to_list(neg_prompt)
 
285
  return ", ".join(out_tags)
286
 
287
 
 
 
 
288
  animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
289
  animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
290
  pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
 
294
  default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
295
  default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
296
  def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "None"):
 
297
  prompts = to_list(prompt)
298
  neg_prompts = to_list(neg_prompt)
299
 
 
303
  last_empty_p = [""] if not prompts and type != "None" else []
304
  last_empty_np = [""] if not neg_prompts and type != "None" else []
305
 
306
+ if type == "Animagine":
307
+ prompts = prompts + animagine_ps
308
+ neg_prompts = neg_prompts + animagine_nps
309
+ elif type == "Pony":
310
+ prompts = prompts + pony_ps
311
+ neg_prompts = neg_prompts + pony_nps
 
 
 
 
312
 
313
  prompt = ", ".join(list_uniq(prompts) + last_empty_p)
314
  neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
 
321
  dict = {}
322
  path = 'model_dict.json' if Path('model_dict.json').exists() else './tagger/model_dict.json'
323
  try:
324
+ with open(path, encoding='utf-8') as f:
325
  dict = json.load(f)
326
  except Exception:
327
  pass
 
331
  model_prompt_dict = load_model_prompt_dict()
332
 
333
 
334
+ def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None", type = "Auto"):
335
+ enable_auto_recom_prompt = True if type == "Auto" else False
336
  if not model_name or not enable_auto_recom_prompt: return prompt, neg_prompt
337
  prompts = to_list(prompt)
338
  neg_prompts = to_list(neg_prompt)