Jeff850 commited on
Commit
11419c0
1 Parent(s): 53b1aea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -12
app.py CHANGED
@@ -3,13 +3,17 @@ import numpy as np
3
  import random
4
  import spaces
5
  import torch
6
- from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler
7
- from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
8
 
9
  dtype = torch.bfloat16
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
- pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to(device)
 
 
 
 
13
 
14
  MAX_SEED = np.iinfo(np.int32).max
15
  MAX_IMAGE_SIZE = 2048
@@ -20,11 +24,11 @@ def infer(prompt, seed=42, randomize_seed=False, width=512, height=512, guidance
20
  seed = random.randint(0, MAX_SEED)
21
  generator = torch.Generator().manual_seed(seed)
22
  image = pipe(
23
- prompt = prompt,
24
- width = width,
25
- height = height,
26
- num_inference_steps = num_inference_steps,
27
- generator = generator,
28
  guidance_scale=guidance_scale
29
  ).images[0]
30
  return image, seed
@@ -83,7 +87,7 @@ with gr.Blocks(css=css) as demo:
83
  minimum=256,
84
  maximum=MAX_IMAGE_SIZE,
85
  step=32,
86
- value=1024,
87
  )
88
 
89
  height = gr.Slider(
@@ -91,7 +95,7 @@ with gr.Blocks(css=css) as demo:
91
  minimum=256,
92
  maximum=MAX_IMAGE_SIZE,
93
  step=32,
94
- value=1024,
95
  )
96
 
97
  with gr.Row():
@@ -101,7 +105,7 @@ with gr.Blocks(css=css) as demo:
101
  minimum=1,
102
  maximum=15,
103
  step=0.1,
104
- value=3.5,
105
  )
106
 
107
  num_inference_steps = gr.Slider(
@@ -127,4 +131,4 @@ with gr.Blocks(css=css) as demo:
127
  outputs = [result, seed]
128
  )
129
 
130
- demo.launch()
 
3
  import random
4
  import spaces
5
  import torch
6
+ from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler
7
+ from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
8
 
9
  dtype = torch.bfloat16
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
+ # Include your Hugging Face access token
13
+ hf_token = "waffles"
14
+
15
+ # Load the diffusion pipeline with the access token
16
+ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, use_auth_token=hf_token).to(device)
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
  MAX_IMAGE_SIZE = 2048
 
24
  seed = random.randint(0, MAX_SEED)
25
  generator = torch.Generator().manual_seed(seed)
26
  image = pipe(
27
+ prompt=prompt,
28
+ width=width,
29
+ height=height,
30
+ num_inference_steps=num_inference_steps,
31
+ generator=generator,
32
  guidance_scale=guidance_scale
33
  ).images[0]
34
  return image, seed
 
87
  minimum=256,
88
  maximum=MAX_IMAGE_SIZE,
89
  step=32,
90
+ value=512,
91
  )
92
 
93
  height = gr.Slider(
 
95
  minimum=256,
96
  maximum=MAX_IMAGE_SIZE,
97
  step=32,
98
+ value=512,
99
  )
100
 
101
  with gr.Row():
 
105
  minimum=1,
106
  maximum=15,
107
  step=0.1,
108
+ value=5.0,
109
  )
110
 
111
  num_inference_steps = gr.Slider(
 
131
  outputs = [result, seed]
132
  )
133
 
134
+ demo.launch()