doevent commited on
Commit
95de845
·
verified ·
1 Parent(s): 8d6e58c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -9
app.py CHANGED
@@ -9,13 +9,7 @@ from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5Tokenize
9
  dtype = torch.bfloat16
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
- model_id = "black-forest-labs/FLUX.1-dev"
13
- adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"
14
-
15
- pipe = FluxPipeline.from_pretrained(model_id, torch_dtype=dtype).to(device)
16
-
17
- pipe.load_lora_weights(adapter_id)
18
- pipe.fuse_lora()
19
 
20
  MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 2048
@@ -32,7 +26,7 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidan
32
  height = height,
33
  num_inference_steps = num_inference_steps,
34
  generator = generator,
35
- guidance_scale=guidance_scale,
36
  ).images[0]
37
  return image, seed
38
 
@@ -132,4 +126,4 @@ Merge by [Sayak Paul](https://huggingface.co/sayakpaul) of 2 of the 12B param re
132
  outputs = [result, seed]
133
  )
134
 
135
- demo.queue(default_concurrency_limit=10).launch(show_error=True)
 
9
  dtype = torch.bfloat16
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
+ pipe = FluxPipeline.from_pretrained("sayakpaul/FLUX.1-merged", torch_dtype=torch.bfloat16).to(device)
 
 
 
 
 
 
13
 
14
  MAX_SEED = np.iinfo(np.int32).max
15
  MAX_IMAGE_SIZE = 2048
 
26
  height = height,
27
  num_inference_steps = num_inference_steps,
28
  generator = generator,
29
+ guidance_scale=guidance_scale
30
  ).images[0]
31
  return image, seed
32
 
 
126
  outputs = [result, seed]
127
  )
128
 
129
+ demo.queue(default_concurrency_limit=10).launch(show_error=True)