SunderAli17 commited on
Commit
8e6a39d
·
verified ·
1 Parent(s): e20ea84

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -14
app.py CHANGED
@@ -12,18 +12,17 @@ import spaces
12
  import torch
13
  from diffusers import AutoencoderKL, DiffusionPipeline
14
 
15
- DESCRIPTION = """
16
- # OpenDalle 1.1
17
- **Demo by [mrfakename](https://mrfake.name/) - [Twitter](https://twitter.com/realmrfakename) - [GitHub](https://github.com/fakerybakery/) - [Hugging Face](https://huggingface.co/mrfakename)**
18
- This is a demo of <a href="https://huggingface.co/dataautogpt3/OpenDalleV1.1">OpenDalle V1.1</a> by @dataautogpt3.
19
- It's a merge of several different models and is supposed to provide excellent performance. Try it out!
20
- [Not Working?](https://huggingface.co/spaces/mrfakename/OpenDalleV1.1-GPU-Demo/discussions/4)
21
- **The code for this demo is based on [@hysts's SD-XL demo](https://huggingface.co/spaces/hysts/SD-XL) running on a A10G GPU.**
22
- **NOTE: The model is licensed under a non-commercial license**
23
- Also see [OpenDalle Original Demo](https://huggingface.co/spaces/mrfakename/OpenDalle-GPU-Demo/)
24
  """
25
  if not torch.cuda.is_available():
26
- DESCRIPTION += "\n<h1>Running on CPU 🥶 This demo does not work on CPU. Please use <a href=\"https://huggingface.co/spaces/mrfakename/OpenDalleV1.1-GPU-Demo\">the online demo</a> instead</h1>"
27
 
28
  MAX_SEED = np.iinfo(np.int32).max
29
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
@@ -73,7 +72,7 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
73
 
74
 
75
  @spaces.GPU
76
- def generate(
77
  prompt: str,
78
  negative_prompt: str = "",
79
  prompt_2: str = "",
@@ -149,7 +148,7 @@ theme = gr.themes.Base(
149
  font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
150
  )
151
  with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
152
- gr.Markdown(DESCRIPTION)
153
  gr.DuplicateButton(
154
  value="Duplicate Space for private use",
155
  elem_id="duplicate-button",
@@ -248,7 +247,7 @@ with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
248
  examples=examples,
249
  inputs=prompt,
250
  outputs=result,
251
- fn=generate,
252
  cache_examples=CACHE_EXAMPLES,
253
  )
254
 
@@ -295,7 +294,7 @@ with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
295
  queue=False,
296
  api_name=False,
297
  ).then(
298
- fn=generate,
299
  inputs=[
300
  prompt,
301
  negative_prompt,
 
12
  import torch
13
  from diffusers import AutoencoderKL, DiffusionPipeline
14
 
15
+ MARKDOWN = """
16
+ This demo utilizes <a href="https://huggingface.co/dataautogpt3/OpenDalleV1.1">OpenDalle V1.1</a> by @dataautogpt3.
17
+ A fusion of different models has been applied in order to provide better visualized results, comparatively.
18
+ Try out with different prompts and do provide your feedback.
19
+
20
+ **Parts of code are borrowed from [@hysts's SD-XL demo](https://huggingface.co/spaces/hysts/SD-XL), which is running on a A10G GPU.**
21
+
22
+ **Demo by [Sunder Ali Khowaja](https://sander-ali.github.io) - [X](https://x.com/SunderAKhowaja) -[Github](https://github.com/sander-ali) -[Hugging Face](https://huggingface.co/SunderAli17)**
 
23
  """
24
  if not torch.cuda.is_available():
25
+ MARKDOWN += "\n<h1>The demo will not work on CPU</h1>"
26
 
27
  MAX_SEED = np.iinfo(np.int32).max
28
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
 
72
 
73
 
74
  @spaces.GPU
75
+ def infer(
76
  prompt: str,
77
  negative_prompt: str = "",
78
  prompt_2: str = "",
 
148
  font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
149
  )
150
  with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
151
+ gr.Markdown(MARKDOWN)
152
  gr.DuplicateButton(
153
  value="Duplicate Space for private use",
154
  elem_id="duplicate-button",
 
247
  examples=examples,
248
  inputs=prompt,
249
  outputs=result,
250
+ fn=infer,
251
  cache_examples=CACHE_EXAMPLES,
252
  )
253
 
 
294
  queue=False,
295
  api_name=False,
296
  ).then(
297
+ fn=infer,
298
  inputs=[
299
  prompt,
300
  negative_prompt,