prithivMLmods commited on
Commit
eb85b5c
·
verified ·
1 Parent(s): ac5130b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -25
app.py CHANGED
@@ -13,7 +13,7 @@ import torch
13
  from diffusers import DiffusionPipeline
14
  from typing import Tuple
15
 
16
- #Check for the Model Base..//
17
  bad_words = json.loads(os.getenv('BAD_WORDS', "[]"))
18
  bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', "[]"))
19
  default_negative = os.getenv("default_negative","")
@@ -26,11 +26,9 @@ def check_text(prompt, negative=""):
26
  if i in negative:
27
  return True
28
  return False
29
- #End of the - Prompt Con
30
-
31
 
32
  style_list = [
33
-
34
  {
35
  "name": "3840 x 2160",
36
  "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
@@ -58,18 +56,9 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
58
  negative = ""
59
  return p.replace("{prompt}", positive), n + negative
60
 
61
-
62
-
63
-
64
-
65
  DESCRIPTION = """## MidJourney 3D
66
-
67
  """
68
 
69
-
70
-
71
-
72
-
73
  if not torch.cuda.is_available():
74
  DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
75
 
@@ -85,13 +74,6 @@ NUM_IMAGES_PER_PROMPT = 1
85
 
86
  if torch.cuda.is_available():
87
  pipe = DiffusionPipeline.from_pretrained(
88
- "yodayo-ai/kivotos-xl-2.0",
89
- torch_dtype=torch.float16,
90
- use_safetensors=True,
91
- add_watermarker=False,
92
- variant="fp16"
93
- )
94
- pipe2 = DiffusionPipeline.from_pretrained(
95
  "Yntec/3Danimation",
96
  torch_dtype=torch.float16,
97
  use_safetensors=True,
@@ -100,15 +82,12 @@ if torch.cuda.is_available():
100
  )
101
  if ENABLE_CPU_OFFLOAD:
102
  pipe.enable_model_cpu_offload()
103
- pipe2.enable_model_cpu_offload()
104
  else:
105
  pipe.to(device)
106
- pipe2.to(device)
107
  print("Loaded on Device!")
108
 
109
  if USE_TORCH_COMPILE:
110
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
111
- pipe2.unet = torch.compile(pipe2.unet, mode="reduce-overhead", fullgraph=True)
112
  print("Model Compiled!")
113
 
114
  def save_image(img):
@@ -159,7 +138,7 @@ def generate(
159
  "output_type": "pil",
160
  }
161
 
162
- images = pipe(**options).images + pipe2(**options).images
163
 
164
  image_paths = [save_image(img) for img in images]
165
  return image_paths, seed
@@ -297,4 +276,4 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
297
  )
298
 
299
  if __name__ == "__main__":
300
- demo.queue(max_size=20).launch()
 
13
  from diffusers import DiffusionPipeline
14
  from typing import Tuple
15
 
16
+ # Check for the Model Base..//
17
  bad_words = json.loads(os.getenv('BAD_WORDS', "[]"))
18
  bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', "[]"))
19
  default_negative = os.getenv("default_negative","")
 
26
  if i in negative:
27
  return True
28
  return False
29
+ # End of the - Prompt Con
 
30
 
31
  style_list = [
 
32
  {
33
  "name": "3840 x 2160",
34
  "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
 
56
  negative = ""
57
  return p.replace("{prompt}", positive), n + negative
58
 
 
 
 
 
59
  DESCRIPTION = """## MidJourney 3D
 
60
  """
61
 
 
 
 
 
62
  if not torch.cuda.is_available():
63
  DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
64
 
 
74
 
75
  if torch.cuda.is_available():
76
  pipe = DiffusionPipeline.from_pretrained(
 
 
 
 
 
 
 
77
  "Yntec/3Danimation",
78
  torch_dtype=torch.float16,
79
  use_safetensors=True,
 
82
  )
83
  if ENABLE_CPU_OFFLOAD:
84
  pipe.enable_model_cpu_offload()
 
85
  else:
86
  pipe.to(device)
 
87
  print("Loaded on Device!")
88
 
89
  if USE_TORCH_COMPILE:
90
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
 
91
  print("Model Compiled!")
92
 
93
  def save_image(img):
 
138
  "output_type": "pil",
139
  }
140
 
141
+ images = pipe(**options).images
142
 
143
  image_paths = [save_image(img) for img in images]
144
  return image_paths, seed
 
276
  )
277
 
278
  if __name__ == "__main__":
279
+ demo.queue(max_size=50).launch()