i0switch commited on
Commit
cd4d606
·
verified ·
1 Parent(s): e1036a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -12
app.py CHANGED
@@ -97,6 +97,8 @@ def initialize_pipelines():
97
  if upsampler is None and not UPSCALE_OK: # 一度失敗したら再試行しない
98
  print("Checking for Upscaler...")
99
  try:
 
 
100
  from basicsr.archs.rrdb_arch import RRDBNet
101
  from realesrgan import RealESRGAN
102
  rrdb = RRDBNet(3, 3, 64, 23, 32, scale=8)
@@ -115,10 +117,8 @@ def initialize_pipelines():
115
  BASE_PROMPT = ("(masterpiece:1.2), best quality, ultra-realistic, RAW photo, 8k,\n""photo of {subject},\n""cinematic lighting, golden hour, rim light, shallow depth of field,\n""textured skin, high detail, shot on Canon EOS R5, 85 mm f/1.4, ISO 200,\n""<lora:ip-adapter-faceid-plusv2_sd15_lora:0.65>, (face),\n""(aesthetic:1.1), (cinematic:0.8)")
116
  NEG_PROMPT = ("ng_deepnegative_v1_75t, CyberRealistic_Negative-neg, UnrealisticDream, ""(worst quality:2), (low quality:1.8), lowres, (jpeg artifacts:1.2), ""painting, sketch, illustration, drawing, cartoon, anime, cgi, render, 3d, ""monochrome, grayscale, text, logo, watermark, signature, username, ""(MajicNegative_V2:0.8), bad hands, extra digits, fused fingers, malformed limbs, ""missing arms, missing legs, (badhandv4:0.7), BadNegAnatomyV1-neg, skin blemishes, acnes, age spot, glans")
117
 
118
-
119
- # ZeroGPUで実行される本体。durationを60秒に設定。
120
- @spaces.GPU(duration=60)
121
- def _generate_core(face_img, subject, add_prompt, add_neg, cfg, ip_scale, steps, w, h, upscale, up_factor, progress=gr.Progress(track_tqdm=True)):
122
  # 初回呼び出し時にパイプラインを初期化
123
  initialize_pipelines()
124
 
@@ -131,18 +131,31 @@ def _generate_core(face_img, subject, add_prompt, add_neg, cfg, ip_scale, steps,
131
  result = pipe(prompt=prompt, negative_prompt=neg, ip_adapter_image=face_img, image=face_img, controlnet_conditioning_scale=0.9, num_inference_steps=int(steps) + 5, guidance_scale=cfg, width=int(w), height=int(h)).images[0]
132
 
133
  if upscale and UPSCALE_OK:
 
 
134
  progress(0.8, desc="Upscaling...")
135
  up, _ = upsampler.enhance(cv2.cvtColor(np.array(result), cv2.COLOR_RGB2BGR), outscale=up_factor)
136
  result = Image.fromarray(cv2.cvtColor(up, cv2.COLOR_BGR2RGB))
137
 
138
  return result
139
 
140
- # GradioのUIから呼び出されるラッパー関数
 
 
 
 
 
 
 
 
 
 
141
  def generate_ui(face_np, subject, add_prompt, add_neg, cfg, ip_scale, steps, w, h, upscale, up_factor, progress=gr.Progress(track_tqdm=True)):
142
  if face_np is None: raise gr.Error("顔画像をアップロードしてください。")
143
  # NumPy配列をPillow画像に変換
144
  face_img = Image.fromarray(face_np)
145
- return _generate_core(face_img, subject, add_prompt, add_neg, cfg, ip_scale, steps, w, h, upscale, up_factor, progress)
 
146
 
147
 
148
  # 5. Gradio UI Definition
@@ -166,7 +179,6 @@ with gr.Blocks() as demo:
166
  with gr.Column():
167
  out_img = gr.Image(label="結果")
168
 
169
- # .queue() はGradioの通常機能として必要
170
  demo.queue()
171
 
172
  btn.click(
@@ -178,7 +190,7 @@ with gr.Blocks() as demo:
178
  # 6. FastAPI Mounting
179
  app = FastAPI()
180
 
181
- # FastAPIのエンドポイントを定義。こちらも内部で_generate_coreを呼ぶ
182
  @app.post("/api/predict")
183
  async def predict_endpoint(
184
  face_image: UploadFile = File(...),
@@ -197,8 +209,8 @@ async def predict_endpoint(
197
  contents = await face_image.read()
198
  pil_image = Image.open(io.BytesIO(contents))
199
 
200
- # FastAPI経由の呼び出しも同じコア関数を利用
201
- result_pil_image = _generate_core(
202
  pil_image, subject, add_prompt, add_neg, cfg, ip_scale,
203
  steps, w, h, upscale, up_factor
204
  )
@@ -217,5 +229,24 @@ app = gr.mount_gradio_app(app, demo, path="/")
217
 
218
  print("Application startup script finished. Waiting for requests.")
219
  if __name__ == "__main__":
220
- import uvicorn
221
- uvicorn.run(app, host="0.0.0.0", port=7860, workers=1, log_level="info")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  if upsampler is None and not UPSCALE_OK: # 一度失敗したら再試行しない
98
  print("Checking for Upscaler...")
99
  try:
100
+ # cv2のインポートをここに追加
101
+ import cv2
102
  from basicsr.archs.rrdb_arch import RRDBNet
103
  from realesrgan import RealESRGAN
104
  rrdb = RRDBNet(3, 3, 64, 23, 32, scale=8)
 
117
  BASE_PROMPT = ("(masterpiece:1.2), best quality, ultra-realistic, RAW photo, 8k,\n""photo of {subject},\n""cinematic lighting, golden hour, rim light, shallow depth of field,\n""textured skin, high detail, shot on Canon EOS R5, 85 mm f/1.4, ISO 200,\n""<lora:ip-adapter-faceid-plusv2_sd15_lora:0.65>, (face),\n""(aesthetic:1.1), (cinematic:0.8)")
118
  NEG_PROMPT = ("ng_deepnegative_v1_75t, CyberRealistic_Negative-neg, UnrealisticDream, ""(worst quality:2), (low quality:1.8), lowres, (jpeg artifacts:1.2), ""painting, sketch, illustration, drawing, cartoon, anime, cgi, render, 3d, ""monochrome, grayscale, text, logo, watermark, signature, username, ""(MajicNegative_V2:0.8), bad hands, extra digits, fused fingers, malformed limbs, ""missing arms, missing legs, (badhandv4:0.7), BadNegAnatomyV1-neg, skin blemishes, acnes, age spot, glans")
119
 
120
+ # 【変更点①】内部的な画像生成関数。@spaces.GPUデコレータを外す
121
+ def _generate_internal(face_img, subject, add_prompt, add_neg, cfg, ip_scale, steps, w, h, upscale, up_factor, progress=gr.Progress(track_tqdm=True)):
 
 
122
  # 初回呼び出し時にパイプラインを初期化
123
  initialize_pipelines()
124
 
 
131
  result = pipe(prompt=prompt, negative_prompt=neg, ip_adapter_image=face_img, image=face_img, controlnet_conditioning_scale=0.9, num_inference_steps=int(steps) + 5, guidance_scale=cfg, width=int(w), height=int(h)).images[0]
132
 
133
  if upscale and UPSCALE_OK:
134
+ # cv2のインポートをここにも追加
135
+ import cv2
136
  progress(0.8, desc="Upscaling...")
137
  up, _ = upsampler.enhance(cv2.cvtColor(np.array(result), cv2.COLOR_RGB2BGR), outscale=up_factor)
138
  result = Image.fromarray(cv2.cvtColor(up, cv2.COLOR_BGR2RGB))
139
 
140
  return result
141
 
142
+ # 【変更点②】@spaces.GPUデコレータを持つ新しいラッパー関数を定義
143
+ @spaces.GPU(duration=60)
144
+ def generate_gpu_wrapper(face_img, subject, add_prompt, add_neg, cfg, ip_scale, steps, w, h, upscale, up_factor, progress=gr.Progress(track_tqdm=True)):
145
+ """
146
+ Hugging Face SpacesプラットフォームにGPUを要求するためのラッパー関数。
147
+ 実際の処理は _generate_internal を呼び出して実行する。
148
+ """
149
+ return _generate_internal(face_img, subject, add_prompt, add_neg, cfg, ip_scale, steps, w, h, upscale, up_factor, progress)
150
+
151
+
152
+ # 【変更点③】GradioのUIから新しいラッパー関数を呼び出すように変更
153
  def generate_ui(face_np, subject, add_prompt, add_neg, cfg, ip_scale, steps, w, h, upscale, up_factor, progress=gr.Progress(track_tqdm=True)):
154
  if face_np is None: raise gr.Error("顔画像をアップロードしてください。")
155
  # NumPy配列をPillow画像に変換
156
  face_img = Image.fromarray(face_np)
157
+ # _generate_coreの代わりにgenerate_gpu_wrapperを呼び出す
158
+ return generate_gpu_wrapper(face_img, subject, add_prompt, add_neg, cfg, ip_scale, steps, w, h, upscale, up_factor, progress)
159
 
160
 
161
  # 5. Gradio UI Definition
 
179
  with gr.Column():
180
  out_img = gr.Image(label="結果")
181
 
 
182
  demo.queue()
183
 
184
  btn.click(
 
190
  # 6. FastAPI Mounting
191
  app = FastAPI()
192
 
193
+ # 【変更点④】FastAPIのエンドポイントも新しいラッパー関数を呼び出すように変更
194
  @app.post("/api/predict")
195
  async def predict_endpoint(
196
  face_image: UploadFile = File(...),
 
209
  contents = await face_image.read()
210
  pil_image = Image.open(io.BytesIO(contents))
211
 
212
+ # _generate_coreの代わりにgenerate_gpu_wrapperを呼び出す
213
+ result_pil_image = generate_gpu_wrapper(
214
  pil_image, subject, add_prompt, add_neg, cfg, ip_scale,
215
  steps, w, h, upscale, up_factor
216
  )
 
229
 
230
  print("Application startup script finished. Waiting for requests.")
231
  if __name__ == "__main__":
232
+ import os, time, socket, uvicorn
233
+
234
+ def port_is_free(port: int) -> bool:
235
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
236
+ return s.connect_ex(("0.0.0.0", port)) != 0
237
+
238
+ port = int(os.getenv("PORT", 7860))
239
+ # ローカルでのテスト用にタイムアウトを短縮
240
+ # timeout_sec = 30
241
+ # poll_interval = 2
242
+ #
243
+ # t0 = time.time()
244
+ # while not port_is_free(port):
245
+ # waited = time.time() - t0
246
+ # if waited >= timeout_sec:
247
+ # raise RuntimeError(f"Port {port} is still busy after {timeout_sec}s")
248
+ # print(f"⚠️ Port {port} busy, retrying in {poll_interval}s …")
249
+ # time.sleep(poll_interval)
250
+
251
+ # Hugging Face Spaces環境ではポートの競合は起こりにくいため、ポートチェックロジックを簡略化・無効化
252
+ uvicorn.run(app, host="0.0.0.0", port=port, workers=1, log_level="info")