Upload 5 files
Browse files- Extras/ffff_assets/core.py +189 -0
- Extras/ffff_assets/default.py +207 -0
- Extras/ffff_assets/preview.py +216 -0
- Extras/ffff_assets/source.py +129 -0
- Extras/ffff_assets/target.py +132 -0
Extras/ffff_assets/core.py
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
import os
|
3 |
+
import warnings
|
4 |
+
from types import ModuleType
|
5 |
+
from typing import Any, Dict, List, Optional
|
6 |
+
|
7 |
+
import gradio
|
8 |
+
from gradio.themes import Size
|
9 |
+
|
10 |
+
from ffff import logger, metadata, state_manager, wording
|
11 |
+
from ffff.exit_helper import hard_exit
|
12 |
+
from ffff.filesystem import resolve_relative_path
|
13 |
+
from ffff.uis import overrides
|
14 |
+
from ffff.uis.typing import Component, ComponentName
|
15 |
+
|
16 |
+
os.environ['GRADIO_ANALYTICS_ENABLED'] = '0'
|
17 |
+
|
18 |
+
warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio')
|
19 |
+
|
20 |
+
gradio.processing_utils.encode_array_to_base64 = overrides.encode_array_to_base64
|
21 |
+
gradio.processing_utils.encode_pil_to_base64 = overrides.encode_pil_to_base64
|
22 |
+
|
23 |
+
UI_COMPONENTS: Dict[ComponentName, Component] = {}
|
24 |
+
UI_LAYOUT_MODULES : List[ModuleType] = []
|
25 |
+
UI_LAYOUT_METHODS =\
|
26 |
+
[
|
27 |
+
'pre_check',
|
28 |
+
'pre_render',
|
29 |
+
'render',
|
30 |
+
'listen',
|
31 |
+
'run'
|
32 |
+
]
|
33 |
+
|
34 |
+
|
35 |
+
def load_ui_layout_module(ui_layout : str) -> Any:
|
36 |
+
try:
|
37 |
+
ui_layout_module = importlib.import_module('ffff.uis.layouts.' + ui_layout)
|
38 |
+
for method_name in UI_LAYOUT_METHODS:
|
39 |
+
if not hasattr(ui_layout_module, method_name):
|
40 |
+
raise NotImplementedError
|
41 |
+
except ModuleNotFoundError as exception:
|
42 |
+
logger.error(wording.get('ui_layout_not_loaded').format(ui_layout = ui_layout), __name__)
|
43 |
+
logger.debug(exception.msg, __name__)
|
44 |
+
hard_exit(1)
|
45 |
+
except NotImplementedError:
|
46 |
+
logger.error(wording.get('ui_layout_not_implemented').format(ui_layout = ui_layout), __name__)
|
47 |
+
hard_exit(1)
|
48 |
+
return ui_layout_module
|
49 |
+
|
50 |
+
|
51 |
+
def get_ui_layouts_modules(ui_layouts : List[str]) -> List[ModuleType]:
|
52 |
+
global UI_LAYOUT_MODULES
|
53 |
+
|
54 |
+
if not UI_LAYOUT_MODULES:
|
55 |
+
for ui_layout in ui_layouts:
|
56 |
+
ui_layout_module = load_ui_layout_module(ui_layout)
|
57 |
+
UI_LAYOUT_MODULES.append(ui_layout_module)
|
58 |
+
return UI_LAYOUT_MODULES
|
59 |
+
|
60 |
+
|
61 |
+
def get_ui_component(component_name : ComponentName) -> Optional[Component]:
|
62 |
+
if component_name in UI_COMPONENTS:
|
63 |
+
return UI_COMPONENTS[component_name]
|
64 |
+
return None
|
65 |
+
|
66 |
+
|
67 |
+
def get_ui_components(component_names : List[ComponentName]) -> Optional[List[Component]]:
|
68 |
+
ui_components = []
|
69 |
+
|
70 |
+
for component_name in component_names:
|
71 |
+
component = get_ui_component(component_name)
|
72 |
+
if component:
|
73 |
+
ui_components.append(component)
|
74 |
+
return ui_components
|
75 |
+
|
76 |
+
|
77 |
+
def register_ui_component(component_name : ComponentName, component: Component) -> None:
|
78 |
+
UI_COMPONENTS[component_name] = component
|
79 |
+
|
80 |
+
|
81 |
+
def launch() -> None:
|
82 |
+
ui_layouts_total = len(state_manager.get_item('ui_layouts'))
|
83 |
+
with gradio.Blocks(theme = get_theme(), css = get_css(), title = metadata.get('name') + ' ' + metadata.get('version'), fill_width = True) as ui:
|
84 |
+
for ui_layout in state_manager.get_item('ui_layouts'):
|
85 |
+
ui_layout_module = load_ui_layout_module(ui_layout)
|
86 |
+
if ui_layout_module.pre_render():
|
87 |
+
if ui_layouts_total > 1:
|
88 |
+
with gradio.Tab(ui_layout):
|
89 |
+
ui_layout_module.render()
|
90 |
+
ui_layout_module.listen()
|
91 |
+
else:
|
92 |
+
ui_layout_module.render()
|
93 |
+
ui_layout_module.listen()
|
94 |
+
|
95 |
+
for ui_layout in state_manager.get_item('ui_layouts'):
|
96 |
+
ui_layout_module = load_ui_layout_module(ui_layout)
|
97 |
+
ui_layout_module.run(ui)
|
98 |
+
|
99 |
+
|
100 |
+
def get_theme() -> gradio.Theme:
|
101 |
+
return gradio.themes.Base(
|
102 |
+
primary_hue = gradio.themes.colors.red,
|
103 |
+
secondary_hue = gradio.themes.colors.neutral,
|
104 |
+
radius_size = Size(
|
105 |
+
xxs = '0.375rem',
|
106 |
+
xs = '0.375rem',
|
107 |
+
sm = '0.375rem',
|
108 |
+
md = '0.375rem',
|
109 |
+
lg = '0.375rem',
|
110 |
+
xl = '0.375rem',
|
111 |
+
xxl = '0.375rem',
|
112 |
+
),
|
113 |
+
font = gradio.themes.GoogleFont('Open Sans')
|
114 |
+
).set(
|
115 |
+
background_fill_primary = '*neutral_100',
|
116 |
+
block_background_fill = 'white',
|
117 |
+
block_border_width = '0',
|
118 |
+
block_label_background_fill = '*neutral_100',
|
119 |
+
block_label_background_fill_dark = '*neutral_700',
|
120 |
+
block_label_border_width = 'none',
|
121 |
+
block_label_margin = '0.5rem',
|
122 |
+
block_label_radius = '*radius_md',
|
123 |
+
block_label_text_color = '*neutral_700',
|
124 |
+
block_label_text_size = '*text_sm',
|
125 |
+
block_label_text_color_dark = 'white',
|
126 |
+
block_label_text_weight = '600',
|
127 |
+
block_title_background_fill = '*neutral_100',
|
128 |
+
block_title_background_fill_dark = '*neutral_700',
|
129 |
+
block_title_padding = '*block_label_padding',
|
130 |
+
block_title_radius = '*block_label_radius',
|
131 |
+
block_title_text_color = '*neutral_700',
|
132 |
+
block_title_text_size = '*text_sm',
|
133 |
+
block_title_text_weight = '600',
|
134 |
+
block_padding = '0.5rem',
|
135 |
+
border_color_primary = 'transparent',
|
136 |
+
border_color_primary_dark = 'transparent',
|
137 |
+
button_large_padding = '2rem 0.5rem',
|
138 |
+
button_large_text_weight = 'normal',
|
139 |
+
button_primary_background_fill = '*primary_500',
|
140 |
+
button_primary_text_color = 'white',
|
141 |
+
button_secondary_background_fill = 'white',
|
142 |
+
button_secondary_border_color = 'transparent',
|
143 |
+
button_secondary_border_color_dark = 'transparent',
|
144 |
+
button_secondary_border_color_hover = 'transparent',
|
145 |
+
button_secondary_border_color_hover_dark = 'transparent',
|
146 |
+
button_secondary_text_color = '*neutral_800',
|
147 |
+
button_small_padding = '0.75rem',
|
148 |
+
checkbox_background_color = '*neutral_200',
|
149 |
+
checkbox_background_color_selected = '*primary_600',
|
150 |
+
checkbox_background_color_selected_dark = '*primary_700',
|
151 |
+
checkbox_border_color_focus = '*primary_500',
|
152 |
+
checkbox_border_color_focus_dark = '*primary_600',
|
153 |
+
checkbox_border_color_selected = '*primary_600',
|
154 |
+
checkbox_border_color_selected_dark = '*primary_700',
|
155 |
+
checkbox_label_background_fill = '*neutral_50',
|
156 |
+
checkbox_label_background_fill_hover = '*neutral_50',
|
157 |
+
checkbox_label_background_fill_selected = '*primary_500',
|
158 |
+
checkbox_label_background_fill_selected_dark = '*primary_600',
|
159 |
+
checkbox_label_text_color_selected = 'white',
|
160 |
+
input_background_fill = '*neutral_50',
|
161 |
+
shadow_drop = 'none',
|
162 |
+
slider_color = '*primary_500',
|
163 |
+
slider_color_dark = '*primary_600'
|
164 |
+
)
|
165 |
+
|
166 |
+
|
167 |
+
def get_css() -> str:
|
168 |
+
# Retorna una cadena vacía para no cargar CSS adicional
|
169 |
+
return ""
|
170 |
+
|
171 |
+
def launch() -> None:
|
172 |
+
ui_layouts_total = len(state_manager.get_item('ui_layouts'))
|
173 |
+
with gradio.Blocks(theme='ParityError/Interstellar', css="") as ui: # Aplica el tema
|
174 |
+
for ui_layout in state_manager.get_item('ui_layouts'):
|
175 |
+
ui_layout_module = load_ui_layout_module(ui_layout)
|
176 |
+
if ui_layout_module.pre_render():
|
177 |
+
if ui_layouts_total > 1:
|
178 |
+
with gradio.Tab(ui_layout):
|
179 |
+
ui_layout_module.render()
|
180 |
+
ui_layout_module.listen()
|
181 |
+
else:
|
182 |
+
ui_layout_module.render()
|
183 |
+
ui_layout_module.listen()
|
184 |
+
|
185 |
+
for ui_layout in state_manager.get_item('ui_layouts'):
|
186 |
+
ui_layout_module = load_ui_layout_module(ui_layout)
|
187 |
+
ui_layout_module.run(ui)
|
188 |
+
|
189 |
+
|
Extras/ffff_assets/default.py
ADDED
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import multiprocessing
|
2 |
+
import gradio as gr
|
3 |
+
import os
|
4 |
+
import requests
|
5 |
+
import hashlib
|
6 |
+
|
7 |
+
from ffff import state_manager
|
8 |
+
from ffff.uis.components import about, age_modifier_options, common_options, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_detector, face_editor_options, face_enhancer_options, face_landmarker, face_masker, face_selector, face_swapper_options, frame_colorizer_options, frame_enhancer_options, instant_runner, job_manager, job_runner, lip_syncer_options, memory, output, output_options, preview, processors, source, target, temp_frame, terminal, trim_frame, ui_workflow
|
9 |
+
|
10 |
+
|
11 |
+
def pre_check() -> bool:
|
12 |
+
return True
|
13 |
+
|
14 |
+
|
15 |
+
def pre_render() -> bool:
|
16 |
+
return True
|
17 |
+
|
18 |
+
def listar_archivos():
|
19 |
+
archivos = [f for f in os.listdir() if os.path.isfile(f)]
|
20 |
+
return archivos
|
21 |
+
|
22 |
+
|
23 |
+
def descargar_archivo(archivo_seleccionado):
|
24 |
+
return os.path.join(os.getcwd(), archivo_seleccionado)
|
25 |
+
|
26 |
+
|
27 |
+
def descargar_de_url(url, filename):
|
28 |
+
local_filename = filename if filename else url.split('/')[-1]
|
29 |
+
if len(local_filename) > 255:
|
30 |
+
local_filename = hashlib.md5(local_filename.encode()).hexdigest()
|
31 |
+
with requests.get(url, stream=True) as r:
|
32 |
+
r.raise_for_status()
|
33 |
+
with open(local_filename, 'wb') as f:
|
34 |
+
for chunk in r.iter_content(chunk_size=8192):
|
35 |
+
f.write(chunk)
|
36 |
+
return local_filename
|
37 |
+
|
38 |
+
|
39 |
+
def render() -> gr.Blocks:
|
40 |
+
with gr.Blocks() as layout:
|
41 |
+
with gr.Row():
|
42 |
+
with gr.Column(scale=1):
|
43 |
+
with gr.Accordion("Columna 1", open=True):
|
44 |
+
about.render()
|
45 |
+
processors.render()
|
46 |
+
age_modifier_options.render()
|
47 |
+
expression_restorer_options.render()
|
48 |
+
face_debugger_options.render()
|
49 |
+
face_editor_options.render()
|
50 |
+
face_enhancer_options.render()
|
51 |
+
face_swapper_options.render()
|
52 |
+
frame_colorizer_options.render()
|
53 |
+
frame_enhancer_options.render()
|
54 |
+
lip_syncer_options.render()
|
55 |
+
execution.render()
|
56 |
+
execution_thread_count.render()
|
57 |
+
execution_queue_count.render()
|
58 |
+
memory.render()
|
59 |
+
temp_frame.render()
|
60 |
+
output_options.render()
|
61 |
+
script_content = """
|
62 |
+
function ClickConnect(){
|
63 |
+
console.log("Working");
|
64 |
+
document.querySelector("#top-toolbar > colab-connect-button").shadowRoot.querySelector("#connect").click();
|
65 |
+
}
|
66 |
+
setInterval(ClickConnect,60000)
|
67 |
+
"""
|
68 |
+
script_box = gr.Textbox(value=script_content, label="Script para Colab", lines=10, interactive=False)
|
69 |
+
gr.Markdown("----")
|
70 |
+
output.render()
|
71 |
+
|
72 |
+
with gr.Column(scale=1):
|
73 |
+
with gr.Accordion("Columna 2", open=True):
|
74 |
+
source.render()
|
75 |
+
|
76 |
+
# Listado de archivos para Source
|
77 |
+
archivos_source = listar_archivos()
|
78 |
+
archivo_seleccionado_source = gr.Dropdown(label="Selecciona un archivo para Source", choices=archivos_source)
|
79 |
+
boton_refrescar_source = gr.Button("Refrescar Archivos Source")
|
80 |
+
cargar_boton_source = gr.Button("Cargar Archivo en Source")
|
81 |
+
|
82 |
+
def actualizar_archivos_source():
|
83 |
+
archivos_actualizados = listar_archivos()
|
84 |
+
return {"choices": archivos_actualizados} # Devolver diccionario
|
85 |
+
|
86 |
+
boton_refrescar_source.click(fn=actualizar_archivos_source, inputs=[], outputs=archivo_seleccionado_source)
|
87 |
+
cargar_boton_source.click(fn=source.cargar_archivo, inputs=archivo_seleccionado_source, outputs=[source.SOURCE_IMAGE, source.SOURCE_VIDEO])
|
88 |
+
|
89 |
+
# Botón para pegar la imagen desde el portapapeles en Source
|
90 |
+
paste_source_button = gr.Button("Pegar del portapapeles en Source")
|
91 |
+
paste_source_button.click(fn=source.handle_paste, outputs=[source.SOURCE_IMAGE, source.SOURCE_VIDEO])
|
92 |
+
|
93 |
+
# Entrada de URL y botón para cargar desde URL en Source
|
94 |
+
url_input_source = gr.Textbox(label="Ingrese la URL del archivo para Source")
|
95 |
+
load_button_source = gr.Button("Cargar desde URL en Source")
|
96 |
+
load_button_source.click(fn=source.handle_url_input, inputs=url_input_source, outputs=[source.SOURCE_IMAGE, source.SOURCE_VIDEO])
|
97 |
+
|
98 |
+
gr.Markdown("----")
|
99 |
+
|
100 |
+
target.render()
|
101 |
+
|
102 |
+
# Listado de archivos para Target
|
103 |
+
archivos_target = listar_archivos()
|
104 |
+
archivo_seleccionado_target = gr.Dropdown(label="Selecciona un archivo para Target", choices=archivos_target)
|
105 |
+
boton_refrescar_target = gr.Button("Refrescar Archivos Target")
|
106 |
+
cargar_boton_target = gr.Button("Cargar Archivo en Target")
|
107 |
+
|
108 |
+
def actualizar_archivos_target():
|
109 |
+
archivos_actualizados = listar_archivos()
|
110 |
+
return {"choices": archivos_actualizados} # Devolver diccionario
|
111 |
+
|
112 |
+
boton_refrescar_target.click(fn=actualizar_archivos_target, inputs=[], outputs=archivo_seleccionado_target)
|
113 |
+
cargar_boton_target.click(fn=target.cargar_archivo, inputs=archivo_seleccionado_target, outputs=[target.TARGET_IMAGE, target.TARGET_VIDEO])
|
114 |
+
|
115 |
+
# Botón para pegar la imagen desde el portapapeles en Target
|
116 |
+
paste_target_button = gr.Button("Pegar del portapapeles en Target")
|
117 |
+
paste_target_button.click(fn=target.handle_paste, outputs=[target.TARGET_IMAGE, target.TARGET_VIDEO])
|
118 |
+
|
119 |
+
# Entrada de URL y botón para cargar desde URL en Target
|
120 |
+
url_input_target = gr.Textbox(label="Ingrese la URL del archivo para Target")
|
121 |
+
load_button_target = gr.Button("Cargar desde URL en Target")
|
122 |
+
load_button_target.click(fn=target.handle_url_input, inputs=url_input_target, outputs=[target.TARGET_IMAGE, target.TARGET_VIDEO])
|
123 |
+
|
124 |
+
terminal.render()
|
125 |
+
ui_workflow.render()
|
126 |
+
instant_runner.render()
|
127 |
+
job_runner.render()
|
128 |
+
job_manager.render()
|
129 |
+
|
130 |
+
with gr.Column(scale=3):
|
131 |
+
with gr.Accordion("Columna 3", open=True):
|
132 |
+
preview.render()
|
133 |
+
trim_frame.render()
|
134 |
+
face_selector.render()
|
135 |
+
face_masker.render()
|
136 |
+
face_detector.render()
|
137 |
+
face_landmarker.render()
|
138 |
+
common_options.render()
|
139 |
+
|
140 |
+
gr.Markdown("----")
|
141 |
+
|
142 |
+
# Nueva sección para descargar archivo desde URL
|
143 |
+
url_input = gr.Textbox(label="Ingrese URL para descargar archivo", placeholder="https://ejemplo.com/archivo.png")
|
144 |
+
filename_input = gr.Textbox(label="Nombre del archivo para descargar (con extensión)", placeholder="archivo.png")
|
145 |
+
download_button = gr.Button("Descargar Archivo")
|
146 |
+
download_output = gr.Textbox(label="Ruta del Archivo Descargado", interactive=False)
|
147 |
+
|
148 |
+
def handle_download(url, filename):
|
149 |
+
local_file = descargar_de_url(url, filename)
|
150 |
+
return local_file
|
151 |
+
|
152 |
+
download_button.click(handle_download, inputs=[url_input, filename_input], outputs=download_output)
|
153 |
+
|
154 |
+
gr.Markdown("----")
|
155 |
+
|
156 |
+
archivos = listar_archivos()
|
157 |
+
archivo_seleccionado = gr.Dropdown(label="Selecciona un archivo", choices=archivos)
|
158 |
+
boton_refrescar = gr.Button("Refrescar Archivos")
|
159 |
+
boton_descargar = gr.Button("Descargar Archivo")
|
160 |
+
output_descarga = gr.File(label="Archivo Descargado")
|
161 |
+
|
162 |
+
def actualizar_archivos():
|
163 |
+
archivos_actualizados = listar_archivos()
|
164 |
+
return {"choices": archivos_actualizados} # Devolver diccionario con las nuevas opciones
|
165 |
+
|
166 |
+
boton_refrescar.click(fn=actualizar_archivos, inputs=[], outputs=archivo_seleccionado)
|
167 |
+
boton_descargar.click(fn=descargar_archivo, inputs=archivo_seleccionado, outputs=output_descarga)
|
168 |
+
|
169 |
+
return layout
|
170 |
+
|
171 |
+
|
172 |
+
def listen() -> None:
|
173 |
+
processors.listen()
|
174 |
+
age_modifier_options.listen()
|
175 |
+
expression_restorer_options.listen()
|
176 |
+
face_debugger_options.listen()
|
177 |
+
face_editor_options.listen()
|
178 |
+
face_enhancer_options.listen()
|
179 |
+
face_swapper_options.listen()
|
180 |
+
frame_colorizer_options.listen()
|
181 |
+
frame_enhancer_options.listen()
|
182 |
+
lip_syncer_options.listen()
|
183 |
+
execution.listen()
|
184 |
+
execution_thread_count.listen()
|
185 |
+
execution_queue_count.listen()
|
186 |
+
memory.listen()
|
187 |
+
temp_frame.listen()
|
188 |
+
output_options.listen()
|
189 |
+
source.listen()
|
190 |
+
target.listen()
|
191 |
+
output.listen()
|
192 |
+
instant_runner.listen()
|
193 |
+
job_runner.listen()
|
194 |
+
job_manager.listen()
|
195 |
+
terminal.listen()
|
196 |
+
preview.listen()
|
197 |
+
trim_frame.listen()
|
198 |
+
face_selector.listen()
|
199 |
+
face_masker.listen()
|
200 |
+
face_detector.listen()
|
201 |
+
face_landmarker.listen()
|
202 |
+
common_options.listen()
|
203 |
+
|
204 |
+
|
205 |
+
def run(ui: gr.Blocks) -> None:
|
206 |
+
concurrency_count = min(8, multiprocessing.cpu_count())
|
207 |
+
ui.launch(favicon_path = 'ffff.ico', inbrowser = state_manager.get_item('open_browser'),share=True)
|
Extras/ffff_assets/preview.py
ADDED
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from time import sleep
|
2 |
+
from typing import Optional
|
3 |
+
import cv2
|
4 |
+
import gradio
|
5 |
+
import numpy
|
6 |
+
|
7 |
+
from ffff import logger, process_manager, state_manager, wording
|
8 |
+
from ffff.audio import create_empty_audio_frame, get_audio_frame
|
9 |
+
from ffff.common_helper import get_first
|
10 |
+
from ffff.content_analyser import analyse_frame
|
11 |
+
from ffff.core import conditional_append_reference_faces
|
12 |
+
from ffff.face_analyser import get_average_face, get_many_faces
|
13 |
+
from ffff.face_store import clear_reference_faces, clear_static_faces, get_reference_faces
|
14 |
+
from ffff.filesystem import filter_audio_paths, is_image, is_video
|
15 |
+
from ffff.processors.core import get_processors_modules
|
16 |
+
from ffff.typing import AudioFrame, Face, FaceSet, VisionFrame
|
17 |
+
from ffff.uis.core import get_ui_component, get_ui_components, register_ui_component
|
18 |
+
from ffff.vision import count_video_frame_total, detect_frame_orientation, get_video_frame, normalize_frame_color, read_static_image, read_static_images, resize_frame_resolution
|
19 |
+
|
20 |
+
PREVIEW_IMAGE: Optional[gradio.Image] = None
|
21 |
+
PREVIEW_FRAME_SLIDER: Optional[gradio.Slider] = None
|
22 |
+
PREVIEW_ORIGINAL_IMAGE: Optional[gradio.Image] = None
|
23 |
+
|
24 |
+
|
25 |
+
def render() -> None:
|
26 |
+
global PREVIEW_IMAGE
|
27 |
+
global PREVIEW_FRAME_SLIDER
|
28 |
+
global PREVIEW_ORIGINAL_IMAGE
|
29 |
+
|
30 |
+
preview_image_args = {
|
31 |
+
'label': wording.get('uis.preview_image'),
|
32 |
+
'interactive': False
|
33 |
+
}
|
34 |
+
preview_frame_slider_args = {
|
35 |
+
'label': wording.get('uis.preview_frame_slider'),
|
36 |
+
'step': 1,
|
37 |
+
'minimum': 0,
|
38 |
+
'maximum': 100,
|
39 |
+
'visible': False
|
40 |
+
}
|
41 |
+
|
42 |
+
original_image_args = {
|
43 |
+
'label': 'Original Frame',
|
44 |
+
'interactive': False,
|
45 |
+
'value': None
|
46 |
+
}
|
47 |
+
|
48 |
+
conditional_append_reference_faces()
|
49 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
50 |
+
source_frames = read_static_images(state_manager.get_item('source_paths'))
|
51 |
+
source_faces = get_many_faces(source_frames)
|
52 |
+
source_face = get_average_face(source_faces)
|
53 |
+
source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths')))
|
54 |
+
source_audio_frame = create_empty_audio_frame()
|
55 |
+
|
56 |
+
if source_audio_path and state_manager.get_item('output_video_fps') and state_manager.get_item('reference_frame_number'):
|
57 |
+
temp_audio_frame = get_audio_frame(source_audio_path, state_manager.get_item('output_video_fps'), state_manager.get_item('reference_frame_number'))
|
58 |
+
if numpy.any(temp_audio_frame):
|
59 |
+
source_audio_frame = temp_audio_frame
|
60 |
+
|
61 |
+
if is_image(state_manager.get_item('target_path')):
|
62 |
+
target_vision_frame = read_static_image(state_manager.get_item('target_path'))
|
63 |
+
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, target_vision_frame)
|
64 |
+
preview_image_args['value'] = normalize_frame_color(preview_vision_frame)
|
65 |
+
original_image_args['value'] = normalize_frame_color(target_vision_frame)
|
66 |
+
elif is_video(state_manager.get_item('target_path')):
|
67 |
+
temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
|
68 |
+
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame)
|
69 |
+
preview_image_args['value'] = normalize_frame_color(preview_vision_frame)
|
70 |
+
original_image_args['value'] = normalize_frame_color(temp_vision_frame)
|
71 |
+
preview_image_args['visible'] = True
|
72 |
+
preview_frame_slider_args['value'] = state_manager.get_item('reference_frame_number')
|
73 |
+
preview_frame_slider_args['maximum'] = count_video_frame_total(state_manager.get_item('target_path'))
|
74 |
+
preview_frame_slider_args['visible'] = True
|
75 |
+
|
76 |
+
PREVIEW_IMAGE = gradio.Image(**preview_image_args)
|
77 |
+
PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args)
|
78 |
+
PREVIEW_ORIGINAL_IMAGE = gradio.Image(**original_image_args)
|
79 |
+
|
80 |
+
register_ui_component('preview_image', PREVIEW_IMAGE)
|
81 |
+
register_ui_component('preview_frame_slider', PREVIEW_FRAME_SLIDER)
|
82 |
+
register_ui_component('preview_original_image', PREVIEW_ORIGINAL_IMAGE)
|
83 |
+
|
84 |
+
|
85 |
+
def listen() -> None:
|
86 |
+
PREVIEW_FRAME_SLIDER.release(update_images, inputs=PREVIEW_FRAME_SLIDER, outputs=[PREVIEW_ORIGINAL_IMAGE, PREVIEW_IMAGE])
|
87 |
+
|
88 |
+
reference_face_position_gallery = get_ui_component('reference_face_position_gallery')
|
89 |
+
if reference_face_position_gallery:
|
90 |
+
reference_face_position_gallery.select(update_preview_image, inputs=PREVIEW_FRAME_SLIDER, outputs=PREVIEW_IMAGE)
|
91 |
+
|
92 |
+
for ui_component in get_ui_components(['source_audio', 'source_image', 'target_image', 'target_video']):
|
93 |
+
for method in ['upload', 'change', 'clear']:
|
94 |
+
getattr(ui_component, method)(update_images, inputs=PREVIEW_FRAME_SLIDER, outputs=[PREVIEW_ORIGINAL_IMAGE, PREVIEW_IMAGE])
|
95 |
+
|
96 |
+
for ui_component in get_ui_components(['target_image', 'target_video']):
|
97 |
+
for method in ['upload', 'change', 'clear']:
|
98 |
+
getattr(ui_component, method)(update_preview_frame_slider, outputs=PREVIEW_FRAME_SLIDER)
|
99 |
+
|
100 |
+
for ui_component in get_ui_components([
|
101 |
+
'face_debugger_items_checkbox_group',
|
102 |
+
'frame_colorizer_size_dropdown',
|
103 |
+
'face_mask_types_checkbox_group',
|
104 |
+
'face_mask_regions_checkbox_group'
|
105 |
+
]):
|
106 |
+
ui_component.change(update_preview_image, inputs=PREVIEW_FRAME_SLIDER, outputs=PREVIEW_IMAGE)
|
107 |
+
|
108 |
+
for ui_component in get_ui_components([
|
109 |
+
'age_modifier_model_dropdown',
|
110 |
+
'expression_restorer_model_dropdown',
|
111 |
+
'processors_checkbox_group',
|
112 |
+
'face_editor_model_dropdown',
|
113 |
+
'face_enhancer_model_dropdown',
|
114 |
+
'face_swapper_model_dropdown',
|
115 |
+
'face_swapper_pixel_boost_dropdown',
|
116 |
+
'frame_colorizer_model_dropdown',
|
117 |
+
'frame_enhancer_model_dropdown',
|
118 |
+
'lip_syncer_model_dropdown',
|
119 |
+
'face_selector_mode_dropdown',
|
120 |
+
'face_selector_order_dropdown',
|
121 |
+
'face_selector_gender_dropdown',
|
122 |
+
'face_selector_race_dropdown',
|
123 |
+
'face_detector_model_dropdown',
|
124 |
+
'face_detector_size_dropdown',
|
125 |
+
'face_detector_angles_checkbox_group',
|
126 |
+
'face_landmarker_model_dropdown'
|
127 |
+
]):
|
128 |
+
ui_component.change(clear_and_update_preview_image, inputs=PREVIEW_FRAME_SLIDER, outputs=PREVIEW_IMAGE)
|
129 |
+
|
130 |
+
for ui_component in get_ui_components([
|
131 |
+
'face_detector_score_slider',
|
132 |
+
'face_landmarker_score_slider'
|
133 |
+
]):
|
134 |
+
ui_component.release(clear_and_update_preview_image, inputs=PREVIEW_FRAME_SLIDER, outputs=PREVIEW_IMAGE)
|
135 |
+
|
136 |
+
|
137 |
+
def update_images(frame_number: int = 0) -> [gradio.Image, gradio.Image]:
|
138 |
+
preview_image = update_preview_image(frame_number)
|
139 |
+
original_image = update_original_frame(frame_number)
|
140 |
+
return [original_image, preview_image]
|
141 |
+
|
142 |
+
|
143 |
+
def clear_and_update_preview_image(frame_number: int = 0) -> gradio.Image:
|
144 |
+
clear_reference_faces()
|
145 |
+
clear_static_faces()
|
146 |
+
return update_preview_image(frame_number)
|
147 |
+
|
148 |
+
|
149 |
+
def update_preview_image(frame_number: int = 0) -> gradio.Image:
|
150 |
+
while process_manager.is_checking():
|
151 |
+
sleep(0.5)
|
152 |
+
conditional_append_reference_faces()
|
153 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
154 |
+
source_frames = read_static_images(state_manager.get_item('source_paths'))
|
155 |
+
source_faces = get_many_faces(source_frames)
|
156 |
+
source_face = get_average_face(source_faces)
|
157 |
+
source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths')))
|
158 |
+
source_audio_frame = create_empty_audio_frame()
|
159 |
+
|
160 |
+
if source_audio_path and state_manager.get_item('output_video_fps') and state_manager.get_item('reference_frame_number'):
|
161 |
+
reference_audio_frame_number = state_manager.get_item('reference_frame_number')
|
162 |
+
if state_manager.get_item('trim_frame_start'):
|
163 |
+
reference_audio_frame_number -= state_manager.get_item('trim_frame_start')
|
164 |
+
temp_audio_frame = get_audio_frame(source_audio_path, state_manager.get_item('output_video_fps'), reference_audio_frame_number)
|
165 |
+
if numpy.any(temp_audio_frame):
|
166 |
+
source_audio_frame = temp_audio_frame
|
167 |
+
|
168 |
+
if is_image(state_manager.get_item('target_path')):
|
169 |
+
target_vision_frame = read_static_image(state_manager.get_item('target_path'))
|
170 |
+
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, target_vision_frame)
|
171 |
+
preview_vision_frame = normalize_frame_color(preview_vision_frame)
|
172 |
+
return gradio.Image(value=preview_vision_frame)
|
173 |
+
|
174 |
+
if is_video(state_manager.get_item('target_path')):
|
175 |
+
temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), frame_number)
|
176 |
+
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame)
|
177 |
+
preview_vision_frame = normalize_frame_color(preview_vision_frame)
|
178 |
+
return gradio.Image(value=preview_vision_frame)
|
179 |
+
|
180 |
+
return gradio.Image(value=None)
|
181 |
+
|
182 |
+
|
183 |
+
def update_original_frame(frame_number: int = 0) -> gradio.Image:
|
184 |
+
if is_image(state_manager.get_item('target_path')):
|
185 |
+
target_vision_frame = read_static_image(state_manager.get_item('target_path'))
|
186 |
+
return gradio.Image(value=normalize_frame_color(target_vision_frame))
|
187 |
+
if is_video(state_manager.get_item('target_path')):
|
188 |
+
temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), frame_number)
|
189 |
+
return gradio.Image(value=normalize_frame_color(temp_vision_frame))
|
190 |
+
return gradio.Image(value=None)
|
191 |
+
|
192 |
+
|
193 |
+
def update_preview_frame_slider() -> gradio.Slider:
|
194 |
+
if is_video(state_manager.get_item('target_path')):
|
195 |
+
video_frame_total = count_video_frame_total(state_manager.get_item('target_path'))
|
196 |
+
return gradio.Slider(maximum=video_frame_total, visible=True)
|
197 |
+
return gradio.Slider(value=0, visible=False)
|
198 |
+
|
199 |
+
|
200 |
+
def process_preview_frame(reference_faces: FaceSet, source_face: Face, source_audio_frame: AudioFrame, target_vision_frame: VisionFrame) -> VisionFrame:
|
201 |
+
source_vision_frame = target_vision_frame.copy()
|
202 |
+
if analyse_frame(target_vision_frame):
|
203 |
+
return cv2.GaussianBlur(target_vision_frame, (99, 99), 0)
|
204 |
+
|
205 |
+
for processor_module in get_processors_modules(state_manager.get_item('processors')):
|
206 |
+
logger.disable()
|
207 |
+
if processor_module.pre_process('preview'):
|
208 |
+
target_vision_frame = processor_module.process_frame({
|
209 |
+
'reference_faces': reference_faces,
|
210 |
+
'source_face': source_face,
|
211 |
+
'source_audio_frame': source_audio_frame,
|
212 |
+
'source_vision_frame': source_vision_frame,
|
213 |
+
'target_vision_frame': target_vision_frame
|
214 |
+
})
|
215 |
+
logger.enable()
|
216 |
+
return target_vision_frame
|
Extras/ffff_assets/source.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional, Tuple
|
2 |
+
import gradio
|
3 |
+
import os
|
4 |
+
import requests
|
5 |
+
import tempfile
|
6 |
+
from PIL import ImageGrab, Image # Para manejar imágenes desde el portapapeles
|
7 |
+
|
8 |
+
from ffff import state_manager, wording
|
9 |
+
from ffff.common_helper import get_first
|
10 |
+
from ffff.filesystem import filter_audio_paths, filter_image_paths, has_audio, has_image, is_video, get_file_size
|
11 |
+
from ffff.vision import get_video_frame, normalize_frame_color
|
12 |
+
from ffff.uis.core import register_ui_component
|
13 |
+
from ffff.uis.typing import File
|
14 |
+
|
15 |
+
FILE_SIZE_LIMIT = 512 * 1024 * 1024
|
16 |
+
|
17 |
+
# Variables globales para almacenar el archivo cargado
|
18 |
+
SOURCE_FILE: Optional[gradio.File] = None
|
19 |
+
SOURCE_IMAGE: Optional[gradio.Image] = None
|
20 |
+
SOURCE_VIDEO: Optional[gradio.Video] = None
|
21 |
+
|
22 |
+
|
23 |
+
def render() -> None:
|
24 |
+
global SOURCE_FILE
|
25 |
+
global SOURCE_IMAGE
|
26 |
+
global SOURCE_VIDEO
|
27 |
+
|
28 |
+
has_source_audio = has_audio(state_manager.get_item('source_paths'))
|
29 |
+
has_source_image = has_image(state_manager.get_item('source_paths'))
|
30 |
+
has_source_video = is_video(state_manager.get_item('source_paths'))
|
31 |
+
|
32 |
+
SOURCE_FILE = gradio.File(
|
33 |
+
file_count='single',
|
34 |
+
file_types=['image', 'video'],
|
35 |
+
label=wording.get('uis.source_file'),
|
36 |
+
value=state_manager.get_item('source_paths') if has_source_audio or has_source_image or has_source_video else None
|
37 |
+
)
|
38 |
+
|
39 |
+
source_file_path = state_manager.get_item('source_paths')[0] if state_manager.get_item('source_paths') else None
|
40 |
+
source_image_args = {'show_label': False, 'visible': False}
|
41 |
+
source_video_args = {'show_label': False, 'visible': False}
|
42 |
+
|
43 |
+
if has_source_image:
|
44 |
+
source_image_args['value'] = source_file_path
|
45 |
+
source_image_args['visible'] = True
|
46 |
+
|
47 |
+
if has_source_video:
|
48 |
+
if get_file_size(source_file_path) > FILE_SIZE_LIMIT:
|
49 |
+
preview_vision_frame = normalize_frame_color(get_video_frame(source_file_path))
|
50 |
+
source_image_args['value'] = preview_vision_frame
|
51 |
+
source_image_args['visible'] = True
|
52 |
+
else:
|
53 |
+
source_video_args['value'] = source_file_path
|
54 |
+
source_video_args['visible'] = True
|
55 |
+
|
56 |
+
SOURCE_IMAGE = gradio.Image(**source_image_args)
|
57 |
+
SOURCE_VIDEO = gradio.Video(**source_video_args)
|
58 |
+
|
59 |
+
register_ui_component('source_image', SOURCE_IMAGE)
|
60 |
+
register_ui_component('source_video', SOURCE_VIDEO)
|
61 |
+
|
62 |
+
|
63 |
+
def listen() -> None:
|
64 |
+
SOURCE_FILE.change(update, inputs=SOURCE_FILE, outputs=[SOURCE_IMAGE, SOURCE_VIDEO])
|
65 |
+
|
66 |
+
|
67 |
+
def update(file: File) -> Tuple[gradio.Image, gradio.Video]:
|
68 |
+
if file and has_image([file.name]):
|
69 |
+
state_manager.set_item('source_paths', [file.name])
|
70 |
+
return gradio.Image(value=file.name, visible=True), gradio.Video(value=None, visible=False)
|
71 |
+
if file and is_video(file.name):
|
72 |
+
state_manager.set_item('source_paths', [file.name])
|
73 |
+
if get_file_size(file.name) > FILE_SIZE_LIMIT:
|
74 |
+
preview_vision_frame = normalize_frame_color(get_video_frame(file.name))
|
75 |
+
return gradio.Image(value=preview_vision_frame, visible=True), gradio.Video(value=None, visible=False)
|
76 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=file.name, visible=True)
|
77 |
+
state_manager.clear_item('source_paths')
|
78 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=None, visible=False)
|
79 |
+
|
80 |
+
|
81 |
+
# Función para descargar un archivo desde URL
|
82 |
+
def download_file_from_url(url: str) -> Optional[str]:
|
83 |
+
try:
|
84 |
+
response = requests.get(url, stream=True)
|
85 |
+
response.raise_for_status()
|
86 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(url)[-1])
|
87 |
+
with open(temp_file.name, 'wb') as file:
|
88 |
+
for chunk in response.iter_content(chunk_size=8192):
|
89 |
+
file.write(chunk)
|
90 |
+
return temp_file.name
|
91 |
+
except Exception as e:
|
92 |
+
print(f"Error al descargar el archivo: {e}")
|
93 |
+
return None
|
94 |
+
|
95 |
+
|
96 |
+
def handle_url_input(url: str) -> Tuple[gradio.Image, gradio.Video]:
|
97 |
+
downloaded_file_path = download_file_from_url(url)
|
98 |
+
if downloaded_file_path:
|
99 |
+
state_manager.set_item('source_paths', [downloaded_file_path])
|
100 |
+
if has_image([downloaded_file_path]):
|
101 |
+
return gradio.Image(value=downloaded_file_path, visible=True), gradio.Video(value=None, visible=False)
|
102 |
+
if is_video(downloaded_file_path):
|
103 |
+
if get_file_size(downloaded_file_path) > FILE_SIZE_LIMIT:
|
104 |
+
preview_vision_frame = normalize_frame_color(get_video_frame(downloaded_file_path))
|
105 |
+
return gradio.Image(value=preview_vision_frame, visible=True), gradio.Video(value=None, visible=False)
|
106 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=downloaded_file_path, visible=True)
|
107 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=None, visible=False)
|
108 |
+
|
109 |
+
|
110 |
+
def handle_paste() -> Tuple[gradio.Image, gradio.Video]:
|
111 |
+
try:
|
112 |
+
image = ImageGrab.grabclipboard()
|
113 |
+
if isinstance(image, Image.Image):
|
114 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
|
115 |
+
image.save(temp_file.name)
|
116 |
+
state_manager.set_item('source_paths', [temp_file.name])
|
117 |
+
return gradio.Image(value=temp_file.name, visible=True), gradio.Video(value=None, visible=False)
|
118 |
+
except Exception as e:
|
119 |
+
print(f"Error al manejar el portapapeles: {e}")
|
120 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=None, visible=False)
|
121 |
+
|
122 |
+
|
123 |
+
def cargar_archivo(archivo_seleccionado: str) -> Tuple[gradio.Image, gradio.Video]:
|
124 |
+
state_manager.set_item('source_paths', [archivo_seleccionado])
|
125 |
+
if has_image([archivo_seleccionado]):
|
126 |
+
return gradio.Image(value=archivo_seleccionado, visible=True), gradio.Video(value=None, visible=False)
|
127 |
+
if is_video(archivo_seleccionado):
|
128 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=archivo_seleccionado, visible=True)
|
129 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=None, visible=False)
|
Extras/ffff_assets/target.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, Tuple
|
2 |
+
import gradio
|
3 |
+
import os
|
4 |
+
import tempfile
|
5 |
+
import requests
|
6 |
+
from PIL import ImageGrab, Image
|
7 |
+
|
8 |
+
from ffff import state_manager, wording
|
9 |
+
from ffff.face_store import clear_reference_faces, clear_static_faces
|
10 |
+
from ffff.filesystem import get_file_size, is_image, is_video
|
11 |
+
from ffff.uis.core import register_ui_component
|
12 |
+
from ffff.vision import get_video_frame, normalize_frame_color
|
13 |
+
from ffff.uis.typing import File
|
14 |
+
|
15 |
+
FILE_SIZE_LIMIT = 512 * 1024 * 1024
|
16 |
+
|
17 |
+
TARGET_FILE: Optional[gradio.File] = None
|
18 |
+
TARGET_IMAGE: Optional[gradio.Image] = None
|
19 |
+
TARGET_VIDEO: Optional[gradio.Video] = None
|
20 |
+
|
21 |
+
|
22 |
+
def render() -> None:
|
23 |
+
global TARGET_FILE
|
24 |
+
global TARGET_IMAGE
|
25 |
+
global TARGET_VIDEO
|
26 |
+
|
27 |
+
# Obtener si el archivo es imagen o video
|
28 |
+
is_target_image = is_image(state_manager.get_item('target_path'))
|
29 |
+
is_target_video = is_video(state_manager.get_item('target_path'))
|
30 |
+
|
31 |
+
TARGET_FILE = gradio.File(
|
32 |
+
label=wording.get('uis.target_file'),
|
33 |
+
file_count='single',
|
34 |
+
file_types=['image', 'video'],
|
35 |
+
value=state_manager.get_item('target_path') if is_target_image or is_target_video else None
|
36 |
+
)
|
37 |
+
|
38 |
+
target_image_options = {'show_label': False, 'visible': False}
|
39 |
+
target_video_options = {'show_label': False, 'visible': False}
|
40 |
+
|
41 |
+
if is_target_image:
|
42 |
+
target_image_options['value'] = TARGET_FILE.value.get('path')
|
43 |
+
target_image_options['visible'] = True
|
44 |
+
|
45 |
+
if is_target_video:
|
46 |
+
if get_file_size(state_manager.get_item('target_path')) > FILE_SIZE_LIMIT:
|
47 |
+
preview_vision_frame = normalize_frame_color(get_video_frame(state_manager.get_item('target_path')))
|
48 |
+
target_image_options['value'] = preview_vision_frame
|
49 |
+
target_image_options['visible'] = True
|
50 |
+
else:
|
51 |
+
target_video_options['value'] = TARGET_FILE.value.get('path')
|
52 |
+
target_video_options['visible'] = True
|
53 |
+
|
54 |
+
TARGET_IMAGE = gradio.Image(**target_image_options)
|
55 |
+
TARGET_VIDEO = gradio.Video(**target_video_options)
|
56 |
+
|
57 |
+
register_ui_component('target_image', TARGET_IMAGE)
|
58 |
+
register_ui_component('target_video', TARGET_VIDEO)
|
59 |
+
|
60 |
+
|
61 |
+
def listen() -> None:
|
62 |
+
TARGET_FILE.change(fn=update, inputs=TARGET_FILE, outputs=[TARGET_IMAGE, TARGET_VIDEO])
|
63 |
+
|
64 |
+
|
65 |
+
def update(file: File) -> Tuple[gradio.Image, gradio.Video]:
|
66 |
+
clear_reference_faces()
|
67 |
+
clear_static_faces()
|
68 |
+
if file and is_image(file.name):
|
69 |
+
state_manager.set_item('target_path', file.name)
|
70 |
+
return gradio.Image(value=file.name, visible=True), gradio.Video(value=None, visible=False)
|
71 |
+
if file and is_video(file.name):
|
72 |
+
state_manager.set_item('target_path', file.name)
|
73 |
+
if get_file_size(file.name) > FILE_SIZE_LIMIT:
|
74 |
+
preview_vision_frame = normalize_frame_color(get_video_frame(file.name))
|
75 |
+
return gradio.Image(value=preview_vision_frame, visible=True), gradio.Video(value=None, visible=False)
|
76 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=file.name, visible=True)
|
77 |
+
state_manager.clear_item('target_path')
|
78 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=None, visible=False)
|
79 |
+
|
80 |
+
|
81 |
+
# Función para descargar un archivo desde URL
|
82 |
+
def download_file_from_url(url: str) -> Optional[str]:
|
83 |
+
try:
|
84 |
+
response = requests.get(url, stream=True)
|
85 |
+
response.raise_for_status()
|
86 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(url)[-1])
|
87 |
+
with open(temp_file.name, 'wb') as file:
|
88 |
+
for chunk in response.iter_content(chunk_size=8192):
|
89 |
+
file.write(chunk)
|
90 |
+
return temp_file.name
|
91 |
+
except Exception as e:
|
92 |
+
print(f"Error al descargar el archivo: {e}")
|
93 |
+
return None
|
94 |
+
|
95 |
+
|
96 |
+
# Función para manejar la entrada de URL en Target
|
97 |
+
def handle_url_input(url: str) -> Tuple[gradio.Image, gradio.Video]:
|
98 |
+
downloaded_file_path = download_file_from_url(url)
|
99 |
+
if downloaded_file_path:
|
100 |
+
state_manager.set_item('target_path', downloaded_file_path)
|
101 |
+
if is_image(downloaded_file_path):
|
102 |
+
return gradio.Image(value=downloaded_file_path, visible=True), gradio.Video(value=None, visible=False)
|
103 |
+
if is_video(downloaded_file_path):
|
104 |
+
if get_file_size(downloaded_file_path) > FILE_SIZE_LIMIT:
|
105 |
+
preview_vision_frame = normalize_frame_color(get_video_frame(downloaded_file_path))
|
106 |
+
return gradio.Image(value=preview_vision_frame, visible=True), gradio.Video(value=None, visible=False)
|
107 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=downloaded_file_path, visible=True)
|
108 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=None, visible=False)
|
109 |
+
|
110 |
+
|
111 |
+
# Función para manejar pegado desde el portapapeles en Target
|
112 |
+
def handle_paste() -> Tuple[gradio.Image, gradio.Video]:
|
113 |
+
try:
|
114 |
+
image = ImageGrab.grabclipboard()
|
115 |
+
if isinstance(image, Image.Image):
|
116 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
|
117 |
+
image.save(temp_file.name)
|
118 |
+
state_manager.set_item('target_path', temp_file.name)
|
119 |
+
return gradio.Image(value=temp_file.name, visible=True), gradio.Video(value=None, visible=False)
|
120 |
+
except Exception as e:
|
121 |
+
print(f"Error al manejar el portapapeles: {e}")
|
122 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=None, visible=False)
|
123 |
+
|
124 |
+
|
125 |
+
# Función para manejar la carga de archivos en Target
|
126 |
+
def cargar_archivo(archivo_seleccionado: str) -> Tuple[gradio.Image, gradio.Video]:
|
127 |
+
state_manager.set_item('target_path', archivo_seleccionado)
|
128 |
+
if is_image(archivo_seleccionado):
|
129 |
+
return gradio.Image(value=archivo_seleccionado, visible=True), gradio.Video(value=None, visible=False)
|
130 |
+
if is_video(archivo_seleccionado):
|
131 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=archivo_seleccionado, visible=True)
|
132 |
+
return gradio.Image(value=None, visible=False), gradio.Video(value=None, visible=False)
|