Spaces:
Runtime error
Runtime error
import os | |
import glob | |
import gradio as gr | |
import base64 | |
import cv2 | |
import numpy as np | |
import oss2 | |
import time | |
from ai_service_python_sdk.client.api.ai_service_aigc_images_api import AIGCImagesApi | |
from ai_service_python_sdk.client.api.ai_service_job_api import AiServiceJobApi | |
from ai_service_python_sdk.client.api_client import ApiClient | |
from ai_service_python_sdk.test import appId, host, token | |
host = os.getenv("PAI_REC_HOST") | |
appId = os.getenv("PAI_REC_APP_ID") | |
token = os.getenv("PAI_REC_TOKEN") | |
access_key_id = os.getenv('OSS_ACCESS_KEY_ID') | |
access_key_secret = os.getenv('OSS_ACCESS_KEY_SECRET') | |
bucket_name = os.getenv('OSS_BUCKET') | |
endpoint = os.getenv('OSS_ENDPOINT') | |
def upload_file(files, current_files): | |
file_paths = [file_d['name'] for file_d in current_files] + [file.name for file in files] | |
return file_paths | |
def decode_image_from_base64jpeg(base64_image): | |
image_bytes = base64.b64decode(base64_image) | |
np_arr = np.frombuffer(image_bytes, np.uint8) | |
image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) | |
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
return image | |
def upload(image_path, number): | |
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name) | |
file_name = image_path.split('/')[-1] | |
ext = file_name.split('.')[-1] | |
file_name = str(number) + '.' + ext | |
timestamp = str(time.time()).split('.')[0] | |
bucket_folder = 'aigc-data/easyphoto_demo_data/' + timestamp + '_user_lora/' | |
oss_file_path = bucket_folder + file_name | |
bucket.put_object_from_file(oss_file_path, image_path) | |
file_url = 'https://' + bucket_name + '.' + endpoint + '/' + bucket_folder + file_name | |
return file_url | |
def upload_template(image_path): | |
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name) | |
file_name = image_path.split('/')[-1] | |
timestamp = str(time.time()).split('.')[0] | |
bucket_folder = 'aigc-data/easyphoto_demo_data/' + timestamp + '_user_template/' | |
oss_file_path = bucket_folder + file_name | |
bucket.put_object_from_file(oss_file_path, image_path) | |
file_url = 'https://' + bucket_name + '.' + endpoint + '/' + bucket_folder + file_name | |
return file_url | |
def easyphoto_train(instance_images): | |
images = [] | |
if instance_images is None or len(instance_images)==0: | |
output = 'Status: no image updated! 没有上传照片' | |
return output, [], [] | |
for number, image in enumerate(instance_images): | |
image_path = image['name'] | |
image_url = upload(image_path, number) | |
images.append(image_url) | |
client = ApiClient(host, appId, token) | |
api = AIGCImagesApi(client) | |
response = api.aigc_images_train(images, '', None) | |
message = response.message | |
model_id = response.data['model_id'] | |
job_id = response.data['job_id'] | |
if message == 'success': | |
state = 'training job submitted. 提交训练任务成功' | |
output = 'Status: ' + state | |
print("job id: " + str(job_id)) | |
print("model id: " + str(model_id)) | |
return output, job_id, model_id | |
else: | |
output = 'Status: submitting training job failed! 提交训练任务失败' | |
return output, [], [] | |
def easyphoto_check(job_id): | |
client = ApiClient(host, appId, token) | |
api = AiServiceJobApi(client) | |
if job_id is None: | |
output = 'Status: checking training status failed! No job id. 状态检查失败' | |
else: | |
try: | |
job_id = int(str(job_id).strip()) | |
response = api.get_async_job_with_id(job_id) | |
message = response.data['job']['message'] | |
output = 'Status: ' + message | |
except: | |
output = 'Status: checking training status failed! 状态检查失败' | |
return output | |
def easyphoto_infer(model_id, selected_template_images, additional_prompt, seed, before_face_fusion_ratio, after_face_fusion_ratio, first_diffusion_steps, first_denoising_strength, second_diffusion_steps, second_denoising_strength, crop_face_preprocess, apply_face_fusion_before, apply_face_fusion_after, color_shift_middle, color_shift_last, background_restore): | |
image_urls = [] | |
if len(selected_template_images) == 0: | |
output_info = 'Status: no templete selected! 需要选择模版' | |
return output_info, [] | |
selected_template_images = eval(selected_template_images) | |
for image in selected_template_images: | |
image_url = upload_template(image) | |
image_urls.append(image_url) | |
client = ApiClient(host, appId, token) | |
api = AIGCImagesApi(client) | |
outputs = [] | |
output_info = None | |
if model_id is None: | |
output_info = 'Status: no model id provided! 需要提供模型id' | |
return output_info, [] | |
model_id = str(model_id).strip() | |
print('model id: ' + model_id) | |
for image_url in image_urls: | |
try: | |
params = { | |
"additional_prompt": additional_prompt, | |
"seed": seed, | |
"before_face_fusion_ratio": before_face_fusion_ratio, | |
"after_face_fusion_ratio": after_face_fusion_ratio, | |
"first_diffusion_steps": first_diffusion_steps, | |
"first_denoising_strength": first_denoising_strength, | |
"second_diffusion_steps": second_diffusion_steps, | |
"second_denoising_strength": second_denoising_strength, | |
"crop_face_preprocess": crop_face_preprocess, | |
"apply_face_fusion_before": apply_face_fusion_before, | |
"apply_face_fusion_after": apply_face_fusion_after, | |
"color_shift_middle": color_shift_middle, | |
"color_shift_last": color_shift_last, | |
"background_restore": background_restore | |
} | |
response = api.aigc_images_create(model_id, image_url, 'photog_infer_with_webui_pmml', params) | |
except: | |
output_info = 'Status: calling eas service failed!' | |
return output_info, [] | |
data = response.data | |
message = response.message | |
if message == 'success': | |
image = data['image'] | |
image = decode_image_from_base64jpeg(image) | |
outputs.append(image) | |
output_info = 'Status: generating image succesfully! 图像生成成功' | |
else: | |
output_info = 'Status: generating image failed! 图像生成失败' | |
return output_info, [] | |
return output_info, outputs | |
with gr.Blocks() as easyphoto_demo: | |
model_id = gr.Textbox(visible=False) | |
with gr.TabItem('Training 训练'): | |
with gr.Blocks(): | |
with gr.Row(): | |
with gr.Column(): | |
instance_images = gr.Gallery().style(columns=[4], rows=[2], object_fit="contain", height="auto") | |
with gr.Row(): | |
upload_button = gr.UploadButton( | |
"Upload Photos 上传照片", file_types=["image"], file_count="multiple" | |
) | |
clear_button = gr.Button("Clear Photos 清除照片") | |
clear_button.click(fn=lambda: [], inputs=None, outputs=instance_images) | |
upload_button.upload(upload_file, inputs=[upload_button, instance_images], outputs=instance_images, queue=False) | |
gr.Markdown( | |
''' | |
训练步骤: | |
1.请上传5-20张半身照片或头肩照片,请确保面部比例不要太小。 | |
2.点击下方的训练按钮,提交训练任务,大约需要15分钟,您可以检查您的训练任务状态。请不要重复点击提交训练任务的按钮! | |
3.当模型训练完成后,任务状态会显示success,切换到推理模式,并根据模板生成照片。 | |
4.如果在上传时遇到卡顿,请修改上传图片的大小,尽量限制在1.5MB以内。 | |
5.在训练或推理过程中,请不要刷新或关闭窗口。 | |
''' | |
) | |
job_id = gr.Textbox(visible=False) | |
with gr.Row(): | |
run_button = gr.Button('Submit My Training Job 提交训练任务') | |
check_button = gr.Button('Check My Training Job Status 检查我的训练任务状态') | |
output_message = gr.Textbox(value="", label="Status 状态", interactive=False) | |
run_button.click(fn=easyphoto_train, | |
inputs=[instance_images], | |
outputs=[output_message, job_id, model_id]) | |
check_button.click(fn=easyphoto_check, | |
inputs=[job_id], | |
outputs=[output_message]) | |
with gr.TabItem('Inference 推理'): | |
templates = glob.glob(r'./*.jpg') | |
preset_template = list(templates) | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
with gr.Column(): | |
template_gallery_list = [(i, i) for i in preset_template] | |
gallery = gr.Gallery(template_gallery_list).style(columns=[4], rows=[2], object_fit="contain", height="auto") | |
def select_function(evt: gr.SelectData): | |
return [preset_template[evt.index]] | |
selected_template_images = gr.Text(show_label=False, visible=False, placeholder="Selected") | |
gallery.select(select_function, None, selected_template_images) | |
with gr.Accordion("Advanced Options 参数设置", open=False): | |
additional_prompt = gr.Textbox( | |
label="Additional Prompt", | |
lines=3, | |
value='masterpiece, beauty', | |
interactive=True | |
) | |
seed = gr.Textbox( | |
label="Seed", | |
value=12345, | |
) | |
with gr.Row(): | |
before_face_fusion_ratio = gr.Slider( | |
minimum=0.2, maximum=0.8, value=0.50, | |
step=0.05, label='Face Fusion Ratio Before' | |
) | |
after_face_fusion_ratio = gr.Slider( | |
minimum=0.2, maximum=0.8, value=0.50, | |
step=0.05, label='Face Fusion Ratio After' | |
) | |
with gr.Row(): | |
first_diffusion_steps = gr.Slider( | |
minimum=15, maximum=50, value=50, | |
step=1, label='First Diffusion steps' | |
) | |
first_denoising_strength = gr.Slider( | |
minimum=0.30, maximum=0.60, value=0.45, | |
step=0.05, label='First Diffusion denoising strength' | |
) | |
with gr.Row(): | |
second_diffusion_steps = gr.Slider( | |
minimum=15, maximum=50, value=20, | |
step=1, label='Second Diffusion steps' | |
) | |
second_denoising_strength = gr.Slider( | |
minimum=0.20, maximum=0.40, value=0.30, | |
step=0.05, label='Second Diffusion denoising strength' | |
) | |
with gr.Row(): | |
crop_face_preprocess = gr.Checkbox( | |
label="Crop Face Preprocess", | |
value=True | |
) | |
apply_face_fusion_before = gr.Checkbox( | |
label="Apply Face Fusion Before", | |
value=True | |
) | |
apply_face_fusion_after = gr.Checkbox( | |
label="Apply Face Fusion After", | |
value=True | |
) | |
with gr.Row(): | |
color_shift_middle = gr.Checkbox( | |
label="Apply color shift first", | |
value=True | |
) | |
color_shift_last = gr.Checkbox( | |
label="Apply color shift last", | |
value=True | |
) | |
background_restore = gr.Checkbox( | |
label="Background Restore", | |
value=False | |
) | |
with gr.Box(): | |
gr.Markdown( | |
''' | |
Parameters: | |
1. **Face Fusion Ratio Before** represents the proportion of the first facial fusion, which is higher and more similar to the training object. | |
2. **Face Fusion Ratio After** represents the proportion of the second facial fusion, which is higher and more similar to the training object. | |
3. **Crop Face Preprocess** represents whether to crop the image before generation, which can adapt to images with smaller faces. | |
4. **Apply Face Fusion Before** represents whether to perform the first facial fusion. | |
5. **Apply Face Fusion After** represents whether to perform the second facial fusion. | |
参数: | |
1.**Face Fusion Ratio Before**表示第一次面部融合的比例,更高且更接近训练对象。 | |
2.**Face Fusion Ratio After**表示第二次面部融合的比例,更高且更接近训练对象。 | |
3.**Crop Face Preprocess**表示是否在生成之前裁剪图像,以适应面部较小的图像。 | |
4.**Apply Face Fusion Before**表示是否执行第一次面部融合。 | |
5.**Apply Face Fusion After**表示是否执行第二次面部融合。 | |
''' | |
) | |
with gr.Column(): | |
gr.Markdown('Generated Results 生成结果') | |
output_images = gr.Gallery( | |
label='Output', | |
show_label=False | |
).style(columns=[4], rows=[2], object_fit="contain", height="auto") | |
display_button = gr.Button('Start Generation 开始生成') | |
infer_progress = gr.Textbox( | |
label="Generation Progress 生成进度", | |
value="", | |
interactive=False | |
) | |
display_button.click( | |
fn=easyphoto_infer, | |
inputs=[model_id, selected_template_images, additional_prompt, seed, before_face_fusion_ratio, after_face_fusion_ratio, first_diffusion_steps, first_denoising_strength, second_diffusion_steps, second_denoising_strength, crop_face_preprocess, apply_face_fusion_before, apply_face_fusion_after, color_shift_middle, color_shift_last, background_restore], | |
outputs=[infer_progress, output_images] | |
) | |
gr.Markdown( | |
""" | |
参考链接 | |
EasyPhoto GitHub:https://github.com/aigc-apps/sd-webui-EasyPhoto | |
阿里云Freetier:https://help.aliyun.com/document_detail/2567864.html | |
智码实验室:https://gallery.pai-ml.com/#/preview/deepLearning/cv/stable_diffusion_easyphoto | |
""") | |
easyphoto_demo.launch(share=True).queue() |