import gradio as gr import spaces import os import io from PIL import Image import base64 from scripts.process_utils import initialize, process_image_as_base64, image_to_base64 from scripts.anime import init_model from datetime import datetime from pytz import timezone from scripts.survey import handle_form_submission, handle_visit_choice, handle_proceed, localize, script, generate_image, send_feedback import torch device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 初期化 initialize(_use_local=False, use_gpu=True, use_dotenv=True) @spaces.GPU def process_image(input_image, mode, weight1=None, weight2=None): tokyo_time = datetime.now(timezone('Asia/Tokyo')).strftime("%Y-%m-%d %H:%M:%S") # 日本時間のタイムスタンプ print(f"[{tokyo_time}] Processing image with mode={mode}, weight1={weight1}, weight2={weight2}") # feedback用のファイル名 tokyo_time = datetime.now(timezone('Asia/Tokyo')).strftime("%Y%m%d_%H%M%S") filename = f"{tokyo_time}_mode={mode}_weight1={weight1}_weight2={weight2}.png" # 既存の画像処理ロジック if mode == "original": sotai_image, sketch_image = process_image_as_base64(input_image, mode, None, None) elif mode == "refine": sotai_image, sketch_image = process_image_as_base64(input_image, mode, weight1, weight2) return sotai_image, sketch_image, None, filename def mix_images(sotai_image_data, sketch_image_data, opacity1, opacity2): sotai_image = Image.open(io.BytesIO(base64.b64decode(sotai_image_data))).convert('RGBA') sketch_image = Image.open(io.BytesIO(base64.b64decode(sketch_image_data))).convert('RGBA') if sotai_image.size != sketch_image.size: sketch_image = sketch_image.resize(sotai_image.size, Image.Resampling.LANCZOS) mixed_image = Image.new('RGBA', sotai_image.size, (255, 255, 255, 255)) sotai_alpha = sotai_image.getchannel('A').point(lambda x: int(x * opacity1)) sketch_alpha = sketch_image.getchannel('A').point(lambda x: int(x * opacity2)) mixed_image.paste(sketch_image, (0, 0), mask=sketch_alpha) mixed_image.paste(sotai_image, (0, 0), mask=sotai_alpha) return mixed_image def send_mixed_feedback(sotai_image_data, sketch_image_data, filename): mixed_image = mix_images(sotai_image_data, sketch_image_data, 0.5, 0.5) return send_feedback(mixed_image, filename) with gr.Blocks() as demo: form_visible_flag = gr.Textbox(value="false", elem_id="form_flag", visible=False) # title gr.HTML("

Image2Body demo

") # description with translations and additional notes gr.HTML("""

Upload an image and select processing options to generate body and sketch images.

まだstandingタグのついた女性キャラクターの1000枚の画像しか学習していないため、他のポーズは上手くできないことをご了承ください。

さらなる情報は@Yeq6Xまでお問い合わせください。

Note: Currently, the model has been trained on only 1000 images of female characters with the 'standing' tag, so other poses may not be processed accurately.

For more information, please contact @Yeq6X.

注意:目前模型仅使用带有“standing”标签的1000张女性角色图像进行训练,因此其他姿势可能无法准确处理。

如需更多信息,请联系@Yeq6X

""") # 訪問回数の選択 with gr.Column(visible=False) as visit_section: # 言語選択セクション with gr.Row(): language_choice = gr.Radio( choices=["en", "ja", "zh"], label="Select Language / 言語を選択 / 选择语言", value="en" ) localized = localize("en") welcome_message = gr.HTML(localized["welcome_message"]) visit_choice = gr.Radio(choices=localized["visit_choices"], label="") # 初回訪問のアンケートセクション with gr.Column(visible=False) as survey_section: # フォームセクション form_section = gr.HTML(localize("en")["form_html"]) # 2回目以降の進むボタンセクション with gr.Column(visible=False) as proceed_section: # gr.HTML("

再訪ありがとうございます!

") # proceed_button = gr.Button("進む") proceed_message = gr.HTML(localize("en")["returning_message"]) proceed_button = gr.Button(localize("en")["proceed_button"], variant="primary") # 言語選択変更時の更新 def update_language(language): localized = localize(language) return ( gr.update(value=localized["welcome_message"]), gr.update(choices=localized["visit_choices"]), gr.update(value=localized["returning_message"]), gr.update(value=localized["proceed_button"]), gr.update(value=localized["form_html"]) ) language_choice.change( update_language, inputs=[language_choice], outputs=[welcome_message, visit_choice, proceed_message, proceed_button, form_section] ) # フォーム送信時の画面切り替え def handle_submit(): return gr.update(visible=False), gr.update(visible=True) submit_flag = gr.Textbox(visible=False, value="false") submit_flag.change( handle_submit, inputs=[], outputs=[form_section] ) # メイン画面セクション with gr.Column(visible=True) as main_section: # interface submit = None with gr.Row(): with gr.Column() as input_col: input_image = gr.Image(type="pil", label="Input Image", height=512) with gr.Tab("original"): original_mode = gr.Text("original", label="Mode", visible=False) original_submit = gr.Button("Submit", variant="primary") with gr.Tab("refine"): refine_input = [ gr.Text("refine", label="Mode", visible=False), gr.Slider(0, 2, value=0.6, step=0.05, label="Weight 1 (Sketch)"), gr.Slider(0, 1, value=0.05, step=0.025, label="Weight 2 (Body)") ] refine_submit = gr.Button("Submit", variant="primary") gr.Examples( examples=[f"images/sample{i}.png" for i in [1, 2, 4, 5, 6, 7, 10, 16, 18, 19]], inputs=[input_image] ) with gr.Column() as output_col: sotai_image_data = gr.Text(label="Sotai Image data", visible=False) sketch_image_data = gr.Text(label="Sketch Image data", visible=False) mixed_image = gr.Image(label="Output Image", elem_id="output_image") opacity_slider1 = gr.Slider(0, 1, value=0.5, step=0.05, label="Opacity (Sotai)") opacity_slider2 = gr.Slider(0, 1, value=0.5, step=0.05, label="Opacity (Sketch)") send_filename = gr.Textbox(label="Feedback", visible=False) gr.HTML("

Send Feedback Image/画像を送信

") gr.HTML("

Images are used only for developer review and will not be shared.

") gr.HTML("

画像は開発者が確認するためだけに使用され、公開されません。

") send_feedback_button = gr.Button("Contribute as Feedback to Developer/開発者へのフィードバックとして協力する") feed_back_result = gr.Textbox(label="Feedback Result") original_submit.click( process_image, inputs=[input_image, original_mode], outputs=[sotai_image_data, sketch_image_data, mixed_image, send_filename] ) refine_submit.click( process_image, inputs=[input_image, refine_input[0], refine_input[1], refine_input[2]], outputs=[sotai_image_data, sketch_image_data, mixed_image, send_filename] ) sotai_image_data.change( mix_images, inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2], outputs=mixed_image ) opacity_slider1.change( mix_images, inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2], outputs=mixed_image ) opacity_slider2.change( mix_images, inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2], outputs=mixed_image ) # フラグ変更時に画面切り替え form_visible_flag.change( handle_form_submission, inputs=[form_visible_flag], outputs=[survey_section, main_section] ) # 選択肢に応じてセクションを切り替え visit_choice.change( handle_visit_choice, inputs=[visit_choice, language_choice], outputs=[visit_section, survey_section, proceed_section] ) # 進むボタン押下時の画面遷移 proceed_button.click( handle_proceed, inputs=[], outputs=[proceed_section, main_section] ) send_feedback_button.click( send_mixed_feedback, inputs=[sotai_image_data, sketch_image_data, send_filename], outputs=[feed_back_result], ) # JavaScriptの読み込み demo.load(js=script) demo.launch()