File size: 2,327 Bytes
395720c
997d948
395720c
997d948
6a1f04e
395720c
 
997d948
 
 
395720c
997d948
 
 
395720c
997d948
 
 
 
 
395720c
997d948
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395720c
 
 
997d948
395720c
 
 
997d948
395720c
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import gradio as gr
import requests
from PIL import Image
from io import BytesIO
import os

# Hugging Face HubのAPIキーを設定
HF_API_KEY = os.getenv("HF_API_KEY")  # 環境変数からAPIキーを取得
API_URL_PRIOR = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-Redux-dev"
API_URL_FLUX = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"

headers = {
    "Authorization": f"Bearer {HF_API_KEY}"
}

def call_hf_api(api_url, payload):
    response = requests.post(api_url, headers=headers, json=payload)
    if response.status_code != 200:
        raise Exception(f"Request failed with status {response.status_code}, {response.text}")
    return response.json()

def process_image_with_api(image_path):
    # 入力画像をロード
    with open(image_path, "rb") as f:
        image_bytes = f.read()
    
    # Prior Reduxモデルで事前処理
    prior_payload = {"inputs": image_bytes}
    prior_response = call_hf_api(API_URL_PRIOR, prior_payload)
    
    # FLUXモデルで画像生成
    flux_payload = {
        "inputs": prior_response,  # Prior Reduxの出力をFLUXモデルに渡す
        "parameters": {
            "guidance_scale": 2.5,
            "num_inference_steps": 50,
            "seed": 0,  # 再現性のためのシード値
        }
    }
    flux_response = call_hf_api(API_URL_FLUX, flux_payload)
    
    # 生成された画像を取得
    generated_image_url = flux_response.get("generated_image_url")
    if not generated_image_url:
        raise Exception("Generated image URL not found in the response.")
    
    # URLから画像をダウンロード
    response = requests.get(generated_image_url)
    generated_image = Image.open(BytesIO(response.content))
    
    return generated_image

# Gradioインターフェースを構築
def infer(image):
    result_image = process_image_with_api(image)
    return result_image

with gr.Blocks() as demo:
    gr.Markdown("# FLUX Image Generation App (via Hugging Face API)")
    
    with gr.Row():
        input_image = gr.Image(type="filepath", label="Input Image")
        output_image = gr.Image(type="pil", label="Generated Image")

    submit_button = gr.Button("Generate")

    submit_button.click(fn=infer, inputs=[input_image], outputs=[output_image])

demo.launch()