Waseem7711 commited on
Commit
44769ce
·
verified ·
1 Parent(s): 13ed197

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -53
app.py CHANGED
@@ -1,72 +1,53 @@
1
- # prompt: write this notebook code for me i want to deploy this model on hugging space using streamlit
 
 
 
2
 
3
- import streamlit as st
4
  import mediapy as media
5
  import random
6
  import sys
7
  import torch
8
  from diffusers import DiffusionPipeline
 
 
 
9
 
10
  # Install necessary libraries if not already installed
11
  try:
12
  import diffusers
 
 
 
 
 
 
13
  except ImportError:
14
- !pip install --quiet --upgrade diffusers transformers accelerate invisible_watermark mediapy
15
 
16
- # Set use_refiner to False for initial deployment. You can add a checkbox later.
17
  use_refiner = False
18
 
19
- @st.cache_resource # Cache the pipeline to avoid reloading every time
20
- def load_pipeline():
21
- pipe = DiffusionPipeline.from_pretrained(
22
- "stabilityai/stable-diffusion-xl-base-1.0",
23
- torch_dtype=torch.float16,
24
- use_safetensors=True,
25
- variant="fp16",
26
- )
27
-
28
- if use_refiner: # Keep the refiner logic but disabled initially
29
- refiner = DiffusionPipeline.from_pretrained(
30
- "stabilityai/stable-diffusion-xl-refiner-1.0",
31
- text_encoder_2=pipe.text_encoder_2,
32
- vae=pipe.vae,
33
- torch_dtype=torch.float16,
34
- use_safetensors=True,
35
- variant="fp16",
36
- )
37
- refiner = refiner.to("cuda")
38
- pipe.enable_model_cpu_offload()
39
- else:
40
- pipe = pipe.to("cuda")
41
- return pipe
42
 
43
- # Load the pipeline
44
- pipe = load_pipeline()
45
-
46
- st.title("Stable Diffusion XL Image Generator")
47
-
48
- # Input text field for the prompt
49
- prompt = st.text_input("Enter your prompt:", "a photo of Pikachu fine dining with a view to the Eiffel Tower")
50
 
51
- # Seed input (optional)
52
- seed = st.number_input("Enter a seed (optional):", min_value=0, value=random.randint(0, sys.maxsize))
 
 
 
 
 
 
 
53
 
54
- if st.button("Generate Image"):
55
- if not prompt:
56
- st.warning("Please enter a prompt.")
57
- else:
58
- with st.spinner("Generating image..."):
59
- images = pipe(
60
- prompt=prompt,
61
- output_type="latent" if use_refiner else "pil",
62
- generator=torch.Generator("cuda").manual_seed(seed),
63
- ).images
64
- if use_refiner: # Condition still here, but the refiner is not used
65
- images = refiner(prompt=prompt, image=images).images
66
- st.write(f"Prompt:\t{prompt}\nSeed:\t{seed}")
67
- st.image(images[0]) #Display in Streamlit
68
- # You might want to add a download button here
69
 
70
- # Add a checkbox for the refiner
71
- #use_refiner = st.checkbox("Use Refiner (Experimental)", value=False)
 
72
 
 
 
1
+ # prompt: i found this error please fix this "File "/home/user/app/app.py", line 14
2
+ # !pip install --quiet --upgrade diffusers transformers accelerate invisible_watermark mediapy
3
+ # ^
4
+ # SyntaxError: invalid syntax"
5
 
 
6
  import mediapy as media
7
  import random
8
  import sys
9
  import torch
10
  from diffusers import DiffusionPipeline
11
+ import streamlit as st
12
+ # Remove this line:
13
+ # %pip install --quiet --upgrade diffusers transformers accelerate invisible_watermark mediapy
14
 
15
  # Install necessary libraries if not already installed
16
  try:
17
  import diffusers
18
+ import transformers
19
+ import accelerate
20
+ import invisible_watermark
21
+ import mediapy
22
+ import streamlit
23
+ import torch
24
  except ImportError:
25
+ !pip install --quiet --upgrade diffusers transformers accelerate invisible_watermark mediapy streamlit torch
26
 
 
27
  use_refiner = False
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
+ pipe = DiffusionPipeline.from_pretrained(
31
+ "stabilityai/stable-diffusion-xl-base-1.0",
32
+ torch_dtype=torch.float16,
33
+ use_safetensors=True,
34
+ variant="fp16",
35
+ )
 
36
 
37
+ if use_refiner:
38
+ refiner = DiffusionPipeline.from_pretrained(
39
+ "stabilityai/stable-diffusion-xl-refiner-1.0",
40
+ text_encoder_2=pipe.text_encoder_2,
41
+ vae=pipe.vae,
42
+ torch_dtype=torch.float16,
43
+ use_safetensors=True,
44
+ variant="fp16",
45
+ )
46
 
47
+ refiner = refiner.to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
+ pipe.enable_model_cpu_offload()
50
+ else:
51
+ pipe = pipe.to("cuda")
52
 
53
+ # ... (rest of your code)