seawolf2357 commited on
Commit
ff50c7b
·
verified ·
1 Parent(s): 52428d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -12
app.py CHANGED
@@ -54,22 +54,26 @@ model = model.cuda()
54
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
55
 
56
  # FLUX 모델 설정
57
- dtype = torch.float16 # bfloat16 대신 float16 사용
58
- device = "cuda" if torch.cuda.is_available() else "cpu"
59
 
60
- pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype)
61
- pipe = pipe.to(device)
62
 
63
-
64
- MAX_SEED = np.iinfo(np.int32).max
65
- MAX_IMAGE_SIZE = 2048
 
66
 
67
  @spaces.GPU(duration=300)
68
  def infer_t2i(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
69
  if randomize_seed:
70
  seed = random.randint(0, MAX_SEED)
71
- generator = torch.Generator(device=device).manual_seed(seed)
72
- with torch.cuda.amp.autocast():
 
 
 
 
73
  image = pipe(
74
  prompt=prompt,
75
  width=width,
@@ -78,11 +82,10 @@ def infer_t2i(prompt, seed=42, randomize_seed=False, width=1024, height=1024, gu
78
  generator=generator,
79
  guidance_scale=guidance_scale
80
  ).images[0]
 
81
  torch.cuda.empty_cache()
82
  return image, seed
83
-
84
-
85
-
86
  @spaces.GPU(duration=300)
87
  def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, fs=3, seed=123, video_length=2):
88
  # 한글 입력 감지 및 번역
 
54
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
55
 
56
  # FLUX 모델 설정
57
+ device = "cpu" # 초기에 CPU에 로드
58
+ dtype = torch.float32 # float32 사용
59
 
60
+ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, use_fast=False)
 
61
 
62
+ # 메모리 사용량 최적화
63
+ if torch.cuda.is_available():
64
+ pipe.enable_sequential_cpu_offload()
65
+ pipe.enable_attention_slicing(1)
66
 
67
  @spaces.GPU(duration=300)
68
  def infer_t2i(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
69
  if randomize_seed:
70
  seed = random.randint(0, MAX_SEED)
71
+ generator = torch.Generator().manual_seed(seed)
72
+
73
+ # 추론 시 GPU 사용 (가능한 경우)
74
+ device = "cuda" if torch.cuda.is_available() else "cpu"
75
+
76
+ with torch.no_grad():
77
  image = pipe(
78
  prompt=prompt,
79
  width=width,
 
82
  generator=generator,
83
  guidance_scale=guidance_scale
84
  ).images[0]
85
+
86
  torch.cuda.empty_cache()
87
  return image, seed
88
+
 
 
89
  @spaces.GPU(duration=300)
90
  def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, fs=3, seed=123, video_length=2):
91
  # 한글 입력 감지 및 번역