JoPmt commited on
Commit
1c3e569
1 Parent(s): c8ec1aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -11
app.py CHANGED
@@ -8,6 +8,8 @@ from accelerate import Accelerator
8
 
9
  accelerator = Accelerator()
10
  models =[
 
 
11
  "stablediffusionapi/disney-pixal-cartoon",
12
  "stablediffusionapi/edge-of-realism",
13
  "sd-dreambooth-library/original-character-cyclps",
@@ -46,7 +48,6 @@ models =[
46
  "digiplay/AIGEN_v1.4_diffusers",
47
  "stablediffusionapi/dreamshaper-v6",
48
  "axolotron/ice-cream-animals",
49
- "FFusion/FFXL400",
50
  "TheLastBen/froggy-style-v21-768",
51
  "FloydianSound/Nixeu_Diffusion_v1-5",
52
  "digiplay/PotoPhotoRealism_v1",
@@ -55,15 +56,9 @@ models =[
55
  def plex(prompt,modil):
56
  pipe = accelerator.prepare(AutoPipelineForText2Image.from_pretrained(""+modil+"", torch_dtype=torch.float32))
57
  pipe = accelerator.prepare(pipe.to("cpu"))
58
- # prompt = "A fantasy landscape, Cinematic lighting"
59
- # negative_prompt = "low quality, bad quality"
60
-
61
- #rmage = load_image(goof)
62
- #original_image = rmage.convert("RGB")
63
- #original_image.thumbnail((512, 512))
64
-
65
- image = pipe(prompt=prompt, num_inference_steps=5).images[0]
66
  return image
67
 
68
- iface = gr.Interface(fn=plex,inputs=[gr.Textbox(label="Prompt"), gr.Dropdown(choices=models, type="value", value=models[random.randint(1, len(models))])],outputs=gr.Image(),title="AutoPipelineForText2Image_SD_Multi",description="AutoPipelineForText2Image_SD_Multi")
69
- iface.launch()
 
 
8
 
9
  accelerator = Accelerator()
10
  models =[
11
+ "prompthero/midjourney-v4-diffusion",
12
+ "nitrosocke/classic-anim-diffusion",
13
  "stablediffusionapi/disney-pixal-cartoon",
14
  "stablediffusionapi/edge-of-realism",
15
  "sd-dreambooth-library/original-character-cyclps",
 
48
  "digiplay/AIGEN_v1.4_diffusers",
49
  "stablediffusionapi/dreamshaper-v6",
50
  "axolotron/ice-cream-animals",
 
51
  "TheLastBen/froggy-style-v21-768",
52
  "FloydianSound/Nixeu_Diffusion_v1-5",
53
  "digiplay/PotoPhotoRealism_v1",
 
56
  def plex(prompt,modil):
57
  pipe = accelerator.prepare(AutoPipelineForText2Image.from_pretrained(""+modil+"", torch_dtype=torch.float32))
58
  pipe = accelerator.prepare(pipe.to("cpu"))
59
+ image = pipe(prompt=prompt, num_inference_steps=10).images[0]
 
 
 
 
 
 
 
60
  return image
61
 
62
+ iface = gr.Interface(fn=plex,outputs=gr.Image(label="Generated Output Image"),inputs=[gr.Dropdown(choices=models, type="value", value=models[random.randint(1, len(models)), gr.Textbox(label="Prompt"), gr.Textbox(label="negative_prompt", value="low quality, bad quality")])], title="AutoPipelineForText2Image_SD_Multi",description="AutoPipelineForText2Image_SD_Multi")
63
+ iface.queue(max_size=1)
64
+ iface.launch(max_threads=1)