Spaces:
Running
Running
import gradio as gr | |
import requests | |
from PIL import Image | |
from io import BytesIO | |
import os | |
# Hugging Face HubのAPIキーを設定 | |
HF_API_KEY = os.getenv("HF_API_KEY") # 環境変数からAPIキーを取得 | |
API_URL_PRIOR = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-Redux-dev" | |
API_URL_FLUX = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev" | |
headers = { | |
"Authorization": f"Bearer {HF_API_KEY}" | |
} | |
def call_hf_api(api_url, payload): | |
response = requests.post(api_url, headers=headers, json=payload) | |
if response.status_code != 200: | |
raise Exception(f"Request failed with status {response.status_code}, {response.text}") | |
return response.json() | |
def process_image_with_api(image_path): | |
# 入力画像をロード | |
with open(image_path, "rb") as f: | |
image_bytes = f.read() | |
# Prior Reduxモデルで事前処理 | |
prior_payload = {"inputs": image_bytes} | |
prior_response = call_hf_api(API_URL_PRIOR, prior_payload) | |
# FLUXモデルで画像生成 | |
flux_payload = { | |
"inputs": prior_response, # Prior Reduxの出力をFLUXモデルに渡す | |
"parameters": { | |
"guidance_scale": 2.5, | |
"num_inference_steps": 50, | |
"seed": 0, # 再現性のためのシード値 | |
} | |
} | |
flux_response = call_hf_api(API_URL_FLUX, flux_payload) | |
# 生成された画像を取得 | |
generated_image_url = flux_response.get("generated_image_url") | |
if not generated_image_url: | |
raise Exception("Generated image URL not found in the response.") | |
# URLから画像をダウンロード | |
response = requests.get(generated_image_url) | |
generated_image = Image.open(BytesIO(response.content)) | |
return generated_image | |
# Gradioインターフェースを構築 | |
def infer(image): | |
result_image = process_image_with_api(image) | |
return result_image | |
with gr.Blocks() as demo: | |
gr.Markdown("# FLUX Image Generation App (via Hugging Face API)") | |
with gr.Row(): | |
input_image = gr.Image(type="filepath", label="Input Image") | |
output_image = gr.Image(type="pil", label="Generated Image") | |
submit_button = gr.Button("Generate") | |
submit_button.click(fn=infer, inputs=[input_image], outputs=[output_image]) | |
demo.launch() | |