import gradio as gr import spaces import os import io from PIL import Image import base64 from scripts.process_utils import initialize, process_image_as_base64, image_to_base64 from scripts.anime import init_model from datetime import datetime from pytz import timezone from scripts.survey import handle_form_submission, handle_visit_choice, handle_proceed, localize, script, generate_image, send_feedback import torch device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 初期化 initialize(_use_local=False, use_gpu=True, use_dotenv=True) @spaces.GPU def process_image(input_image, mode, weight1=None, weight2=None): tokyo_time = datetime.now(timezone('Asia/Tokyo')).strftime("%Y-%m-%d %H:%M:%S") # 日本時間のタイムスタンプ print(f"[{tokyo_time}] Processing image with mode={mode}, weight1={weight1}, weight2={weight2}") # feedback用のファイル名 tokyo_time = datetime.now(timezone('Asia/Tokyo')).strftime("%Y%m%d_%H%M%S") filename = f"{tokyo_time}_mode={mode}_weight1={weight1}_weight2={weight2}.png" # 既存の画像処理ロジック if mode == "original": sotai_image, sketch_image = process_image_as_base64(input_image, mode, None, None) elif mode == "refine": sotai_image, sketch_image = process_image_as_base64(input_image, mode, weight1, weight2) return sotai_image, sketch_image, None, filename def mix_images(sotai_image_data, sketch_image_data, opacity1, opacity2): sotai_image = Image.open(io.BytesIO(base64.b64decode(sotai_image_data))).convert('RGBA') sketch_image = Image.open(io.BytesIO(base64.b64decode(sketch_image_data))).convert('RGBA') if sotai_image.size != sketch_image.size: sketch_image = sketch_image.resize(sotai_image.size, Image.Resampling.LANCZOS) mixed_image = Image.new('RGBA', sotai_image.size, (255, 255, 255, 255)) sotai_alpha = sotai_image.getchannel('A').point(lambda x: int(x * opacity1)) sketch_alpha = sketch_image.getchannel('A').point(lambda x: int(x * opacity2)) mixed_image.paste(sketch_image, (0, 0), mask=sketch_alpha) mixed_image.paste(sotai_image, (0, 0), mask=sotai_alpha) return mixed_image def send_mixed_feedback(sotai_image_data, sketch_image_data, filename): mixed_image = mix_images(sotai_image_data, sketch_image_data, 0.5, 0.5) return send_feedback(mixed_image, filename) with gr.Blocks() as demo: form_visible_flag = gr.Textbox(value="false", elem_id="form_flag", visible=False) # title gr.HTML("
Upload an image and select processing options to generate body and sketch images.
まだstandingタグのついた女性キャラクターの1000枚の画像しか学習していないため、他のポーズは上手くできないことをご了承ください。
さらなる情報は@Yeq6Xまでお問い合わせください。
Note: Currently, the model has been trained on only 1000 images of female characters with the 'standing' tag, so other poses may not be processed accurately.
For more information, please contact @Yeq6X.
注意:目前模型仅使用带有“standing”标签的1000张女性角色图像进行训练,因此其他姿势可能无法准确处理。
如需更多信息,请联系@Yeq6X。
""") # 訪問回数の選択 with gr.Column(visible=False) as visit_section: # 言語選択セクション with gr.Row(): language_choice = gr.Radio( choices=["en", "ja", "zh"], label="Select Language / 言語を選択 / 选择语言", value="en" ) localized = localize("en") welcome_message = gr.HTML(localized["welcome_message"]) visit_choice = gr.Radio(choices=localized["visit_choices"], label="") # 初回訪問のアンケートセクション with gr.Column(visible=False) as survey_section: # フォームセクション form_section = gr.HTML(localize("en")["form_html"]) # 2回目以降の進むボタンセクション with gr.Column(visible=False) as proceed_section: # gr.HTML("Images are used only for developer review and will not be shared.
") gr.HTML("画像は開発者が確認するためだけに使用され、公開されません。
") send_feedback_button = gr.Button("Contribute as Feedback to Developer/開発者へのフィードバックとして協力する") feed_back_result = gr.Textbox(label="Feedback Result") original_submit.click( process_image, inputs=[input_image, original_mode], outputs=[sotai_image_data, sketch_image_data, mixed_image, send_filename] ) refine_submit.click( process_image, inputs=[input_image, refine_input[0], refine_input[1], refine_input[2]], outputs=[sotai_image_data, sketch_image_data, mixed_image, send_filename] ) sotai_image_data.change( mix_images, inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2], outputs=mixed_image ) opacity_slider1.change( mix_images, inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2], outputs=mixed_image ) opacity_slider2.change( mix_images, inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2], outputs=mixed_image ) # フラグ変更時に画面切り替え form_visible_flag.change( handle_form_submission, inputs=[form_visible_flag], outputs=[survey_section, main_section] ) # 選択肢に応じてセクションを切り替え visit_choice.change( handle_visit_choice, inputs=[visit_choice, language_choice], outputs=[visit_section, survey_section, proceed_section] ) # 進むボタン押下時の画面遷移 proceed_button.click( handle_proceed, inputs=[], outputs=[proceed_section, main_section] ) send_feedback_button.click( send_mixed_feedback, inputs=[sotai_image_data, sketch_image_data, send_filename], outputs=[feed_back_result], ) # JavaScriptの読み込み demo.load(js=script) demo.launch()