Spaces:
Sleeping
Sleeping
init
Browse files- .gitignore +37 -0
- .python-version +1 -0
- .vscode/.ruff.toml +32 -0
- .vscode/settings.json +20 -0
- LICENCE +21 -0
- LICENCE-layerdivider +21 -0
- README.md +102 -12
- app.py +165 -0
- input/empty.psd +0 -0
- ldivider/ld_convertor.py +27 -0
- ldivider/ld_processor_fast.py +269 -0
- ldivider/ld_utils.py +57 -0
- output/.gitignore +1 -0
- output/__init__.py +0 -0
- output/tmp/seg_layer/sample.txt +0 -0
- pyproject.toml +18 -0
- requirements.txt +197 -0
- segment_model/sample.txt +0 -0
- uv.lock +0 -0
.gitignore
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
__pycache__/*
|
4 |
+
*.py[cod]
|
5 |
+
*$py.class
|
6 |
+
|
7 |
+
# Distribution / packaging
|
8 |
+
.Python
|
9 |
+
build/
|
10 |
+
develop-eggs/
|
11 |
+
dist/
|
12 |
+
downloads/
|
13 |
+
eggs/
|
14 |
+
.eggs/
|
15 |
+
lib/
|
16 |
+
lib64/
|
17 |
+
parts/
|
18 |
+
sdist/
|
19 |
+
var/
|
20 |
+
wheels/
|
21 |
+
share/python-wheels/
|
22 |
+
*.egg-info/
|
23 |
+
.installed.cfg
|
24 |
+
*.egg
|
25 |
+
MANIFEST
|
26 |
+
|
27 |
+
# PyInstaller
|
28 |
+
# Usually these files are written by a python script from a template
|
29 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
30 |
+
*.manifest
|
31 |
+
*.spec
|
32 |
+
|
33 |
+
# Jupyter Notebook
|
34 |
+
*.ipynb_checkpoints
|
35 |
+
*.ipynb:Zone.Identifier
|
36 |
+
|
37 |
+
.venv
|
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.10
|
.vscode/.ruff.toml
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
exclude = [
|
2 |
+
"**/*.ipynb"
|
3 |
+
]
|
4 |
+
|
5 |
+
[lint]
|
6 |
+
select = [
|
7 |
+
"ANN", # 型アノテーション
|
8 |
+
"D", # docstring
|
9 |
+
# "B", # バグを誘発するコード
|
10 |
+
"I", # isort
|
11 |
+
"RUF016",
|
12 |
+
"RUF005",
|
13 |
+
]
|
14 |
+
extend-ignore = [
|
15 |
+
"D100", # docstringを書いていない外部パッケージがある為無効化
|
16 |
+
"D104", # パッケージのドキュメント
|
17 |
+
"D107", # __init__ 関数のDoc string (クラスの説明は クラス自体のdocstringに記述する為省略)
|
18 |
+
"D400", # 句読点で終わる、日本語で書いているので無効化する
|
19 |
+
"D203", # クラスのdocstringの前に空白を入れる、D211と競合する為無効化
|
20 |
+
"D213", # 複数行のdocstringは2行目から書く、D212と競合する為無効化
|
21 |
+
"D413", # docstringの最終セクションの後の空行
|
22 |
+
"D415", # 句読点で終わる、日本語で書いているので無効化する
|
23 |
+
|
24 |
+
"F403",
|
25 |
+
|
26 |
+
"ANN202", # プライベート関数の戻り値アノテーション
|
27 |
+
"ANN204", # __init__ の戻り値アノテーション (常に None の為省略)
|
28 |
+
|
29 |
+
# ruff 最新バージョンでは削除済み
|
30 |
+
"ANN101", # メソッドの selfは暗黙的に分かる為、省略
|
31 |
+
"ANN102", # クラスメソッドの clsは暗黙的に分かる為、省略
|
32 |
+
]
|
.vscode/settings.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
3 |
+
"editor.formatOnSave": true,
|
4 |
+
"editor.codeActionsOnSave": {
|
5 |
+
"source.fixAll.eslint": "explicit",
|
6 |
+
"source.organizeImports": "explicit"
|
7 |
+
},
|
8 |
+
"typescript.preferences.importModuleSpecifier": "non-relative",
|
9 |
+
"[python]": {
|
10 |
+
"editor.defaultFormatter": "charliermarsh.ruff",
|
11 |
+
"editor.codeActionsOnSave": {
|
12 |
+
"source.fixAll": "explicit",
|
13 |
+
"source.organizeImports": "explicit"
|
14 |
+
},
|
15 |
+
"editor.tabSize": 4
|
16 |
+
},
|
17 |
+
"ruff.lint.args": ["--config", "${workspaceFolder}/.vscode/.ruff.toml"],
|
18 |
+
"ruff.organizeImports": false,
|
19 |
+
"files.insertFinalNewline": true
|
20 |
+
}
|
LICENCE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 sk-uma
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
LICENCE-layerdivider
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 mattya_monaca
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,12 +1,102 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Faster layerdivider
|
2 |
+
|
3 |
+
This is an optimized version of a part of the processing from the single-layer decomposition tool [layerdivider](https://github.com/mattyamonaca/layerdivider) by [mattya_monaca](https://github.com/mattyamonaca).
|
4 |
+
|
5 |
+
A tool to divide a single illustration into a layered structure.
|
6 |
+

|
7 |
+
|
8 |
+

|
9 |
+
|
10 |
+
https://user-images.githubusercontent.com/48423148/223344286-bf2dff31-3fc5-4970-8d68-86274f1f36eb.mp4
|
11 |
+
|
12 |
+
# Install
|
13 |
+
|
14 |
+
## Local Install
|
15 |
+
|
16 |
+
### Windows Installation
|
17 |
+
|
18 |
+
#### Required Dependencies
|
19 |
+
|
20 |
+
Python 3.10.8 and Git
|
21 |
+
|
22 |
+
#### install Step
|
23 |
+
|
24 |
+
##### use uv (recommended)
|
25 |
+
|
26 |
+
1. clone repository
|
27 |
+
|
28 |
+
```
|
29 |
+
git clone https://github.com/mattyamonaca/layerdivider
|
30 |
+
```
|
31 |
+
|
32 |
+
2. run `install.ps1` first time use, waiting for installation to complete.
|
33 |
+
3. run `run_gui.ps1` to open local gui.
|
34 |
+
4. open website localhost:port to use(The default is localhost:7860).
|
35 |
+
|
36 |
+
#### Optional: For Python Launcher Users
|
37 |
+
|
38 |
+
If you use Python launcher (py command) for executing Python, you should use install_with_launcher.ps1 instead of install.ps1.
|
39 |
+
The reason is install_with_launcher.ps1 uses py command for executing venv module, while install.ps1 uses python command.
|
40 |
+
|
41 |
+
# 処理内容
|
42 |
+
|
43 |
+
1. 入力された画像をピクセル単位で RGB 情報に従いクラスタリング
|
44 |
+
2. 色の類似度(CIEDE2000 基準)が近いクラスタを統合
|
45 |
+
3. 入力された画像をブラー処理で平滑化
|
46 |
+
4. クラスタごとにブラー処理後の色の平均値を出し、算出した平均値ですべてのピクセルを塗りなおし
|
47 |
+
5. 2-4 を指定された回数繰り返し実行
|
48 |
+
6. 最終的なクラスタリング結果に基づき、ベースとなるレイヤーを作成
|
49 |
+
7. ベースレイヤーの各色を、入力された画像のクラスタ毎の平均色で塗りなおし
|
50 |
+
8. ベースレイヤーとオリジナルの色差に基づいて効果レイヤーを算出
|
51 |
+
|
52 |
+
# Processing content
|
53 |
+
|
54 |
+
1. Cluster the input image based on RGB information at the pixel level.
|
55 |
+
2. Merge clusters with similar color similarity (based on CIEDE2000 criteria).
|
56 |
+
3. Smooth the input image using a blur process.
|
57 |
+
4. For each cluster, calculate the average color value after blurring and repaint all pixels with this calculated value.
|
58 |
+
5. Repeat steps 2-4 for a specified number of times.
|
59 |
+
6. Create a base layer based on the final clustering result.
|
60 |
+
7. Repaint each color in the base layer with the average color of each cluster in the input image.
|
61 |
+
8. Calculate an effect layer based on differences between the base layer and original colors.
|
62 |
+
|
63 |
+
# パラメータ説明
|
64 |
+
|
65 |
+
- loops: 処理 2-4 を繰り返す回数
|
66 |
+
- init_cluster: 処理 1 で生成するクラスタの数(大きいほど細かくレイヤー分けされる)
|
67 |
+
- ciede_threshold: 処理 2 でどの程度色が類似していたらクラスタを結合するか決める閾値
|
68 |
+
- blur_size: 処理 3 でかけるブラー処理の大きさ(大きいほど強くぼかす)
|
69 |
+
- output_layer_mode
|
70 |
+
- normal: 通常レイヤーのみで出力される PSD を構成
|
71 |
+
- composite: 通常レイヤー、スクリーンレイヤー、乗算レイヤー、減算レイヤー、加算レイヤーを組み合わせて出力される PSD を構成
|
72 |
+
|
73 |
+
# Parameter Description
|
74 |
+
|
75 |
+
- loops: Number of times to repeat processing 2-4.
|
76 |
+
- init_cluster: Number of clusters generated in process 1 (the larger the number, the more finely layered it is).
|
77 |
+
- ciede_threshold: Threshold for determining when to combine clusters in process 2 based on how similar their colors are.
|
78 |
+
- blur_size: Size of the blur applied in process 3 (the larger the size, the stronger the blurring effect).
|
79 |
+
- output_layer_mode:
|
80 |
+
- normal: Constructs a PSD that only includes normal layers.
|
81 |
+
- composite: Constructs a PSD by combining normal layers with screen, multiply, subtract and add layers.
|
82 |
+
|
83 |
+
# ライセンス
|
84 |
+
|
85 |
+
`pytoshop/*` 以外と `pytoshop/packbits.py` については MIT ライセンス `pytoshop/*` と ``pytoshop/packbits.py` については BSD ライセンス
|
86 |
+
詳細はライセンスファイルをご確認ください。
|
87 |
+
|
88 |
+
# 謝辞
|
89 |
+
|
90 |
+
以下のコードを参考にしました。
|
91 |
+
|
92 |
+
## https://github.com/mattyamonaca/layerdivider
|
93 |
+
|
94 |
+
本リポジトリの元となるコード、理論はこのリポジトリを継承しています。
|
95 |
+
|
96 |
+
## https://github.com/mdboom/pytoshop
|
97 |
+
|
98 |
+
psd ファイルへの書き込み
|
99 |
+
|
100 |
+
## https://github.com/psd-tools/packbits
|
101 |
+
|
102 |
+
`packbits.py` の元となるコード
|
app.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
|
4 |
+
import cv2
|
5 |
+
import gradio as gr
|
6 |
+
|
7 |
+
from ldivider.ld_convertor import cv2pil, pil2cv
|
8 |
+
from ldivider.ld_processor_fast import get_base, get_composite_layer, get_normal_layer
|
9 |
+
from ldivider.ld_utils import save_psd
|
10 |
+
from pytoshop.enums import BlendMode
|
11 |
+
|
12 |
+
path = os.getcwd()
|
13 |
+
output_dir = f"{path}/output"
|
14 |
+
input_dir = f"{path}/input"
|
15 |
+
model_dir = f"{path}/segment_model"
|
16 |
+
|
17 |
+
|
18 |
+
class webui:
|
19 |
+
def __init__(self):
|
20 |
+
self.demo = gr.Blocks()
|
21 |
+
|
22 |
+
def color_base_divide(
|
23 |
+
self, input_image, loops, init_cluster, ciede_threshold, blur_size, layer_mode
|
24 |
+
):
|
25 |
+
image = pil2cv(input_image)
|
26 |
+
self.input_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
|
27 |
+
|
28 |
+
base_image, label = get_base(
|
29 |
+
self.input_image, loops, init_cluster, ciede_threshold, blur_size
|
30 |
+
)
|
31 |
+
|
32 |
+
image = cv2pil(image)
|
33 |
+
if layer_mode == "composite":
|
34 |
+
(
|
35 |
+
base_layer_list,
|
36 |
+
shadow_layer_list,
|
37 |
+
bright_layer_list,
|
38 |
+
addition_layer_list,
|
39 |
+
subtract_layer_list,
|
40 |
+
) = get_composite_layer(self.input_image, base_image, label)
|
41 |
+
filename = save_psd(
|
42 |
+
self.input_image,
|
43 |
+
[
|
44 |
+
base_layer_list,
|
45 |
+
bright_layer_list,
|
46 |
+
shadow_layer_list,
|
47 |
+
subtract_layer_list,
|
48 |
+
addition_layer_list,
|
49 |
+
],
|
50 |
+
["base", "screen", "multiply", "subtract", "addition"],
|
51 |
+
[
|
52 |
+
BlendMode.normal,
|
53 |
+
BlendMode.screen,
|
54 |
+
BlendMode.multiply,
|
55 |
+
BlendMode.subtract,
|
56 |
+
BlendMode.linear_dodge,
|
57 |
+
],
|
58 |
+
output_dir,
|
59 |
+
layer_mode,
|
60 |
+
)
|
61 |
+
base_layer_list = [cv2pil(layer) for layer in base_layer_list]
|
62 |
+
return (
|
63 |
+
[image, base_image],
|
64 |
+
base_layer_list,
|
65 |
+
bright_layer_list,
|
66 |
+
shadow_layer_list,
|
67 |
+
filename,
|
68 |
+
)
|
69 |
+
elif layer_mode == "normal":
|
70 |
+
base_layer_list, bright_layer_list, shadow_layer_list = get_normal_layer(
|
71 |
+
self.input_image, base_image, label
|
72 |
+
)
|
73 |
+
filename = save_psd(
|
74 |
+
self.input_image,
|
75 |
+
[base_layer_list, bright_layer_list, shadow_layer_list],
|
76 |
+
["base", "bright", "shadow"],
|
77 |
+
[BlendMode.normal, BlendMode.normal, BlendMode.normal],
|
78 |
+
output_dir,
|
79 |
+
layer_mode,
|
80 |
+
)
|
81 |
+
return (
|
82 |
+
[image, base_image],
|
83 |
+
base_layer_list,
|
84 |
+
bright_layer_list,
|
85 |
+
shadow_layer_list,
|
86 |
+
filename,
|
87 |
+
)
|
88 |
+
else:
|
89 |
+
return None
|
90 |
+
|
91 |
+
def launch(self, share):
|
92 |
+
with self.demo:
|
93 |
+
with gr.Row():
|
94 |
+
with gr.Column():
|
95 |
+
input_image = gr.Image(type="pil")
|
96 |
+
with gr.Accordion("ColorBase Settings", open=True):
|
97 |
+
loops = gr.Slider(
|
98 |
+
1, 20, value=1, step=1, label="loops", show_label=True
|
99 |
+
)
|
100 |
+
init_cluster = gr.Slider(
|
101 |
+
1,
|
102 |
+
50,
|
103 |
+
value=10,
|
104 |
+
step=1,
|
105 |
+
label="init_cluster",
|
106 |
+
show_label=True,
|
107 |
+
)
|
108 |
+
ciede_threshold = gr.Slider(
|
109 |
+
1,
|
110 |
+
50,
|
111 |
+
value=5,
|
112 |
+
step=1,
|
113 |
+
label="ciede_threshold",
|
114 |
+
show_label=True,
|
115 |
+
)
|
116 |
+
blur_size = gr.Slider(
|
117 |
+
1, 20, value=5, label="blur_size", show_label=True
|
118 |
+
)
|
119 |
+
layer_mode = gr.Dropdown(
|
120 |
+
["normal", "composite"],
|
121 |
+
value="normal",
|
122 |
+
label="output_layer_mode",
|
123 |
+
show_label=True,
|
124 |
+
)
|
125 |
+
|
126 |
+
submit = gr.Button(value="Create PSD")
|
127 |
+
with gr.Row():
|
128 |
+
with gr.Column():
|
129 |
+
with gr.Tab("output"):
|
130 |
+
output_0 = gr.Gallery()
|
131 |
+
with gr.Tab("base"):
|
132 |
+
output_1 = gr.Gallery()
|
133 |
+
with gr.Tab("bright"):
|
134 |
+
output_2 = gr.Gallery()
|
135 |
+
with gr.Tab("shadow"):
|
136 |
+
output_3 = gr.Gallery()
|
137 |
+
|
138 |
+
output_file = gr.File()
|
139 |
+
|
140 |
+
submit.click(
|
141 |
+
self.color_base_divide,
|
142 |
+
inputs=[
|
143 |
+
input_image,
|
144 |
+
loops,
|
145 |
+
init_cluster,
|
146 |
+
ciede_threshold,
|
147 |
+
blur_size,
|
148 |
+
layer_mode,
|
149 |
+
],
|
150 |
+
outputs=[output_0, output_1, output_2, output_3, output_file],
|
151 |
+
)
|
152 |
+
|
153 |
+
self.demo.queue()
|
154 |
+
self.demo.launch(share=share)
|
155 |
+
|
156 |
+
|
157 |
+
if __name__ == "__main__":
|
158 |
+
ui = webui()
|
159 |
+
if len(sys.argv) > 1:
|
160 |
+
if sys.argv[1] == "share":
|
161 |
+
ui.launch(share=True)
|
162 |
+
else:
|
163 |
+
ui.launch(share=False)
|
164 |
+
else:
|
165 |
+
ui.launch(share=False)
|
input/empty.psd
ADDED
|
ldivider/ld_convertor.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from PIL import Image
|
3 |
+
|
4 |
+
|
5 |
+
def pil2cv(image: Image.Image) -> np.ndarray:
|
6 |
+
"""PIL Image to OpenCV image"""
|
7 |
+
new_image = np.array(image, dtype=np.uint8)
|
8 |
+
if new_image.ndim == 2:
|
9 |
+
pass
|
10 |
+
elif new_image.shape[2] == 3:
|
11 |
+
new_image = new_image[:, :, ::-1]
|
12 |
+
elif new_image.shape[2] == 4:
|
13 |
+
new_image = new_image[:, :, [2, 1, 0, 3]]
|
14 |
+
return new_image
|
15 |
+
|
16 |
+
|
17 |
+
def cv2pil(image: np.ndarray) -> Image.Image:
|
18 |
+
"""OpenCV image to PIL Image"""
|
19 |
+
new_image = image.copy()
|
20 |
+
if new_image.ndim == 2:
|
21 |
+
pass
|
22 |
+
elif new_image.shape[2] == 3:
|
23 |
+
new_image = new_image[:, :, ::-1]
|
24 |
+
elif new_image.shape[2] == 4:
|
25 |
+
new_image = new_image[:, :, [2, 1, 0, 3]]
|
26 |
+
new_image = Image.fromarray(new_image)
|
27 |
+
return new_image
|
ldivider/ld_processor_fast.py
ADDED
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
from typing import List, Tuple
|
3 |
+
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
from skimage import color
|
7 |
+
from sklearn.cluster import MiniBatchKMeans
|
8 |
+
from sklearn.utils import shuffle
|
9 |
+
|
10 |
+
|
11 |
+
def _fix_seed(seed: int) -> None:
|
12 |
+
random.seed(seed)
|
13 |
+
np.random.seed(seed)
|
14 |
+
|
15 |
+
|
16 |
+
SEED = 42
|
17 |
+
_fix_seed(SEED)
|
18 |
+
|
19 |
+
|
20 |
+
def _get_new_group(rgb_means: np.ndarray, threshold: int):
|
21 |
+
merge_target = []
|
22 |
+
lab_means = color.rgb2lab(rgb_means, channel_axis=1)
|
23 |
+
for i in range(len(rgb_means)):
|
24 |
+
for j in range(i + 1, len(rgb_means)):
|
25 |
+
distance = color.deltaE_ciede2000(lab_means[i], lab_means[j])
|
26 |
+
if distance < threshold:
|
27 |
+
merge_target.append((i, j))
|
28 |
+
merge_dict = {k: v for k, v in enumerate(range(len(lab_means)))}
|
29 |
+
for a, b in merge_target:
|
30 |
+
a = merge_dict[a]
|
31 |
+
merge_dict[b] = a
|
32 |
+
new_group_keys = {k: v for v, k in enumerate(set(merge_dict.values()))}
|
33 |
+
groups = {k: [] for k in new_group_keys.values()}
|
34 |
+
for k in merge_dict.keys():
|
35 |
+
merge_dict[k] = new_group_keys[merge_dict[k]]
|
36 |
+
groups[merge_dict[k]].append(k)
|
37 |
+
return merge_dict, groups
|
38 |
+
|
39 |
+
|
40 |
+
def _get_rgb_means(
|
41 |
+
img: np.ndarray,
|
42 |
+
labels: np.ndarray,
|
43 |
+
label_counts: int,
|
44 |
+
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
45 |
+
"""画像の平均色を取得する"""
|
46 |
+
cls = np.arange(label_counts)
|
47 |
+
|
48 |
+
masks = np.bitwise_and(img[:, :, 3] > 127, cls.reshape(-1, 1, 1) == labels)
|
49 |
+
|
50 |
+
cls_counts = masks.sum(axis=(1, 2)) # 各クラスのピクセル数
|
51 |
+
cls_sum = (img[:, :, :3] * masks[:, :, :, None]).sum(
|
52 |
+
axis=(1, 2)
|
53 |
+
) # 各クラスのRGBの合計値
|
54 |
+
rgb_means = cls_sum / (cls_counts[:, None] + 1e-6) # 各クラスのRGBの平均値
|
55 |
+
|
56 |
+
return rgb_means, cls_counts, masks
|
57 |
+
|
58 |
+
|
59 |
+
def get_base(
|
60 |
+
img: np.ndarray,
|
61 |
+
loop: int,
|
62 |
+
cls_num: int,
|
63 |
+
threshold: int,
|
64 |
+
size: int,
|
65 |
+
kmeans_samples: int = -1,
|
66 |
+
) -> Tuple[np.ndarray, np.ndarray]:
|
67 |
+
"""画像をクラスタリングして平均色を取得し、色の近いクラスタを統合する関数
|
68 |
+
|
69 |
+
Parameters
|
70 |
+
----------
|
71 |
+
img : np.ndarray
|
72 |
+
入力画像
|
73 |
+
loop : int
|
74 |
+
ループ回数
|
75 |
+
cls_num : int
|
76 |
+
クラスタ数
|
77 |
+
threshold : int
|
78 |
+
統合する閾値
|
79 |
+
size : int
|
80 |
+
ブラーのサイズ
|
81 |
+
kmeans_samples : int, optional
|
82 |
+
kmenas のサンプル数, by default -1
|
83 |
+
"""
|
84 |
+
rgb_flatten = cluster_samples = img[..., :3].reshape((-1, 3))
|
85 |
+
im_h, im_w = img.shape[:2]
|
86 |
+
|
87 |
+
alpha_mask = np.where(img[..., 3] > 127)
|
88 |
+
resampling = False
|
89 |
+
if rgb_flatten.shape[0] > len(alpha_mask[0]):
|
90 |
+
# 透過部分がある場合は透過部分のみをサンプリング
|
91 |
+
cluster_samples = img[..., :3][alpha_mask].reshape((-1, 3))
|
92 |
+
resampling = True
|
93 |
+
|
94 |
+
if len(rgb_flatten) > kmeans_samples and kmeans_samples > 0:
|
95 |
+
# kmeans のサンプル数が指定されている場合は一部のみを使用する
|
96 |
+
cluster_samples = shuffle(
|
97 |
+
cluster_samples, random_state=0, n_samples=kmeans_samples
|
98 |
+
)
|
99 |
+
resampling = True
|
100 |
+
|
101 |
+
kmeans = MiniBatchKMeans(n_clusters=cls_num).fit(cluster_samples)
|
102 |
+
|
103 |
+
if resampling:
|
104 |
+
labels = kmeans.predict(rgb_flatten)
|
105 |
+
else:
|
106 |
+
labels = kmeans.labels_
|
107 |
+
|
108 |
+
label_counts = kmeans.n_clusters
|
109 |
+
labels = labels.reshape(im_h, im_w)
|
110 |
+
|
111 |
+
assert loop > 0
|
112 |
+
img_ori = img.copy()
|
113 |
+
for i in range(loop):
|
114 |
+
img = cv2.blur(img, (size, size))
|
115 |
+
rgb_means, cls_counts, _ = _get_rgb_means(img, labels, label_counts)
|
116 |
+
merge_dict, groups = _get_new_group(rgb_means, threshold)
|
117 |
+
label_counts = len(groups)
|
118 |
+
group_means = {}
|
119 |
+
for group_id, label_ids in groups.items():
|
120 |
+
means = rgb_means[label_ids]
|
121 |
+
cnt = cls_counts[label_ids]
|
122 |
+
group_means[group_id] = (means * cnt[:, None]).sum(axis=0) / cnt.sum()
|
123 |
+
for k, v in merge_dict.items():
|
124 |
+
labels[labels == k] = v
|
125 |
+
if i != loop - 1:
|
126 |
+
img[labels == v, :3] = group_means[v]
|
127 |
+
|
128 |
+
img = img_ori
|
129 |
+
rgb_means, cls_counts, masks = _get_rgb_means(img, labels, label_counts)
|
130 |
+
for mask, rgb in zip(masks, rgb_means):
|
131 |
+
img[mask, :3] = rgb
|
132 |
+
|
133 |
+
img = img.clip(0, 255).astype(np.uint8)
|
134 |
+
labels = labels.squeeze().astype(np.uint32)
|
135 |
+
return img, labels
|
136 |
+
|
137 |
+
|
138 |
+
def _split_img_batch(
|
139 |
+
images: List[np.ndarray], labels: np.ndarray
|
140 |
+
) -> List[List[np.ndarray]]:
|
141 |
+
unique_labels = np.unique(labels) # ラベルの一意なクラスを取得
|
142 |
+
|
143 |
+
splited_images = [[] for _ in range(len(images))]
|
144 |
+
|
145 |
+
for cls_no in unique_labels:
|
146 |
+
mask = labels == cls_no # マスクを拡張してimageの次元に合わせる
|
147 |
+
for i, image in enumerate(images):
|
148 |
+
masked_img = image * mask[:, :, None]
|
149 |
+
splited_images[i].append(masked_img)
|
150 |
+
|
151 |
+
return splited_images
|
152 |
+
|
153 |
+
|
154 |
+
def get_normal_layer(
|
155 |
+
input_image: np.ndarray, base_image: np.ndarray, label: np.ndarray
|
156 |
+
) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
|
157 |
+
"""通常のレイヤーを取得する関数"""
|
158 |
+
base_image = base_image.astype(np.int32)
|
159 |
+
input_image = input_image.astype(np.int32)
|
160 |
+
|
161 |
+
base_image_hsv = cv2.cvtColor(
|
162 |
+
base_image[:, :, :3].astype(np.uint8), cv2.COLOR_RGB2HSV
|
163 |
+
)
|
164 |
+
input_image_hsv = cv2.cvtColor(
|
165 |
+
input_image[:, :, :3].astype(np.uint8), cv2.COLOR_RGB2HSV
|
166 |
+
)
|
167 |
+
|
168 |
+
bright_mask = base_image_hsv[:, :, 2] < input_image_hsv[:, :, 2]
|
169 |
+
bright_image = input_image.copy()
|
170 |
+
bright_image[:, :, 3] = bright_image[:, :, 3] * bright_mask
|
171 |
+
|
172 |
+
shadow_mask = base_image_hsv[:, :, 2] >= input_image_hsv[:, :, 2]
|
173 |
+
shadow_image = input_image.copy()
|
174 |
+
shadow_image[:, :, 3] = shadow_image[:, :, 3] * shadow_mask
|
175 |
+
|
176 |
+
[
|
177 |
+
base_layer_list,
|
178 |
+
bright_layer_list,
|
179 |
+
shadow_layer_list,
|
180 |
+
] = _split_img_batch(
|
181 |
+
np.array(
|
182 |
+
[
|
183 |
+
base_image,
|
184 |
+
bright_image,
|
185 |
+
shadow_image,
|
186 |
+
]
|
187 |
+
),
|
188 |
+
label,
|
189 |
+
)
|
190 |
+
|
191 |
+
return (
|
192 |
+
[t.astype(np.uint8) for t in base_layer_list],
|
193 |
+
[t.astype(np.uint8) for t in bright_layer_list],
|
194 |
+
[t.astype(np.uint8) for t in shadow_layer_list],
|
195 |
+
)
|
196 |
+
|
197 |
+
|
198 |
+
def get_composite_layer(
|
199 |
+
input_image: np.ndarray, base_image: np.ndarray, label: np.ndarray
|
200 |
+
) -> Tuple[
|
201 |
+
List[np.ndarray],
|
202 |
+
List[np.ndarray],
|
203 |
+
List[np.ndarray],
|
204 |
+
List[np.ndarray],
|
205 |
+
List[np.ndarray],
|
206 |
+
]:
|
207 |
+
"""画像の合成を行う関数"""
|
208 |
+
base_image = base_image.astype(np.int32)
|
209 |
+
input_image = input_image.astype(np.int32)
|
210 |
+
|
211 |
+
diff_image = base_image - input_image
|
212 |
+
|
213 |
+
# Shadow (影)
|
214 |
+
shadow_mask = (diff_image[:, :, :3] > 0).all(axis=2)
|
215 |
+
shadow_image = input_image.copy()
|
216 |
+
shadow_image[:, :, 3] = shadow_image[:, :, 3] * shadow_mask
|
217 |
+
shadow_image[:, :, :3] = (shadow_image[:, :, :3] * 255) / base_image[:, :, :3]
|
218 |
+
|
219 |
+
# Screen (逆光)
|
220 |
+
screen_mask = (diff_image[:, :, :3] < 0).all(axis=2)
|
221 |
+
screen_image = input_image.copy()
|
222 |
+
screen_image[:, :, 3] = screen_image[:, :, 3] * screen_mask
|
223 |
+
screen_image[:, :, :3] = (screen_image[:, :, :3] - base_image[:, :, :3]) / (
|
224 |
+
1 - base_image[:, :, :3] / 255
|
225 |
+
)
|
226 |
+
|
227 |
+
# Residuals (残差)
|
228 |
+
residuals_mask = ~shadow_mask & ~screen_mask
|
229 |
+
residuals_image = input_image[:, :, 3].copy()
|
230 |
+
residuals_image = residuals_image * residuals_mask
|
231 |
+
|
232 |
+
# Addition (加算)
|
233 |
+
addition_image = input_image.copy()
|
234 |
+
addition_image[:, :, 3] = residuals_image
|
235 |
+
addition_image[:, :, :3] = input_image[:, :, :3] - base_image[:, :, :3]
|
236 |
+
addition_image[:, :, :3] = addition_image[:, :, :3].clip(0, 255)
|
237 |
+
|
238 |
+
# Subtract (減算)
|
239 |
+
subtract_image = input_image.copy()
|
240 |
+
subtract_image[:, :, 3] = residuals_image
|
241 |
+
subtract_image[:, :, :3] = base_image[:, :, :3] - input_image[:, :, :3]
|
242 |
+
subtract_image[:, :, :3] = subtract_image[:, :, :3].clip(0, 255)
|
243 |
+
|
244 |
+
[
|
245 |
+
base_layer_list,
|
246 |
+
shadow_layer_list,
|
247 |
+
screen_layer_list,
|
248 |
+
addition_layer_list,
|
249 |
+
subtract_layer_list,
|
250 |
+
] = _split_img_batch(
|
251 |
+
np.array(
|
252 |
+
[
|
253 |
+
base_image,
|
254 |
+
shadow_image,
|
255 |
+
screen_image,
|
256 |
+
addition_image,
|
257 |
+
subtract_image,
|
258 |
+
]
|
259 |
+
),
|
260 |
+
label,
|
261 |
+
)
|
262 |
+
|
263 |
+
return (
|
264 |
+
[t.astype(np.uint8) for t in base_layer_list],
|
265 |
+
[t.astype(np.uint8) for t in shadow_layer_list],
|
266 |
+
[t.astype(np.uint8) for t in screen_layer_list],
|
267 |
+
[t.astype(np.uint8) for t in addition_layer_list],
|
268 |
+
[t.astype(np.uint8) for t in subtract_layer_list],
|
269 |
+
)
|
ldivider/ld_utils.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import string
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
import pytoshop
|
7 |
+
from pytoshop import layers
|
8 |
+
|
9 |
+
|
10 |
+
def randomname(n):
|
11 |
+
randlst = [random.choice(string.ascii_letters + string.digits) for i in range(n)]
|
12 |
+
return "".join(randlst)
|
13 |
+
|
14 |
+
|
15 |
+
def add_psd(psd: pytoshop.core.PsdFile, img, name, mode):
|
16 |
+
layer_1 = layers.ChannelImageData(image=img[:, :, 3], compression=1)
|
17 |
+
layer0 = layers.ChannelImageData(image=img[:, :, 0], compression=1)
|
18 |
+
layer1 = layers.ChannelImageData(image=img[:, :, 1], compression=1)
|
19 |
+
layer2 = layers.ChannelImageData(image=img[:, :, 2], compression=1)
|
20 |
+
|
21 |
+
new_layer = layers.LayerRecord(
|
22 |
+
channels={-1: layer_1, 0: layer0, 1: layer1, 2: layer2},
|
23 |
+
top=0,
|
24 |
+
bottom=img.shape[0],
|
25 |
+
left=0,
|
26 |
+
right=img.shape[1],
|
27 |
+
blend_mode=mode,
|
28 |
+
name=name,
|
29 |
+
opacity=255,
|
30 |
+
)
|
31 |
+
psd.layer_and_mask_info.layer_info.layer_records.append(new_layer)
|
32 |
+
return psd
|
33 |
+
|
34 |
+
|
35 |
+
def save_psd(input_image: np.ndarray, layers, names, modes, output_dir, layer_mode):
|
36 |
+
psd = pytoshop.core.PsdFile(
|
37 |
+
num_channels=3, height=input_image.shape[0], width=input_image.shape[1]
|
38 |
+
)
|
39 |
+
if layer_mode == "normal":
|
40 |
+
for idx, output in enumerate(layers[0]):
|
41 |
+
psd = add_psd(psd, layers[0][idx], names[0] + str(idx), modes[0])
|
42 |
+
psd = add_psd(psd, layers[1][idx], names[1] + str(idx), modes[1])
|
43 |
+
psd = add_psd(psd, layers[2][idx], names[2] + str(idx), modes[2])
|
44 |
+
else:
|
45 |
+
for idx, output in enumerate(layers[0]):
|
46 |
+
psd = add_psd(psd, layers[0][idx], names[0] + str(idx), modes[0])
|
47 |
+
psd = add_psd(psd, layers[1][idx], names[1] + str(idx), modes[1])
|
48 |
+
psd = add_psd(psd, layers[2][idx], names[2] + str(idx), modes[2])
|
49 |
+
psd = add_psd(psd, layers[3][idx], names[3] + str(idx), modes[3])
|
50 |
+
psd = add_psd(psd, layers[4][idx], names[4] + str(idx), modes[4])
|
51 |
+
|
52 |
+
name = randomname(10)
|
53 |
+
|
54 |
+
with open(f"{output_dir}/output_{name}.psd", "wb") as fd2:
|
55 |
+
psd.write(fd2)
|
56 |
+
|
57 |
+
return f"{output_dir}/output_{name}.psd"
|
output/.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
*.psd
|
output/__init__.py
ADDED
File without changes
|
output/tmp/seg_layer/sample.txt
ADDED
File without changes
|
pyproject.toml
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "layerdivider"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "Add your description here"
|
5 |
+
readme = "README.md"
|
6 |
+
requires-python = ">=3.10"
|
7 |
+
dependencies = [
|
8 |
+
"gradio==5.6.0",
|
9 |
+
"gradio-client==1.4.3",
|
10 |
+
"numba>=0.61.0",
|
11 |
+
"numpy>=2",
|
12 |
+
"opencv-python>=4.11.0.86",
|
13 |
+
"pillow>=11.1.0",
|
14 |
+
"pytoshop-whl>=0.1.0",
|
15 |
+
"scikit-image>=0.25.1",
|
16 |
+
"scikit-learn>=1.6.1",
|
17 |
+
"setuptools>=75.8.0",
|
18 |
+
]
|
requirements.txt
ADDED
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file was autogenerated by uv via the following command:
|
2 |
+
# uv pip compile pyproject.toml -o requirements.txt
|
3 |
+
aiofiles==23.2.1
|
4 |
+
# via gradio
|
5 |
+
annotated-types==0.7.0
|
6 |
+
# via pydantic
|
7 |
+
anyio==4.9.0
|
8 |
+
# via
|
9 |
+
# gradio
|
10 |
+
# httpx
|
11 |
+
# starlette
|
12 |
+
certifi==2025.1.31
|
13 |
+
# via
|
14 |
+
# httpcore
|
15 |
+
# httpx
|
16 |
+
# requests
|
17 |
+
charset-normalizer==3.4.1
|
18 |
+
# via requests
|
19 |
+
click==8.1.8
|
20 |
+
# via
|
21 |
+
# typer
|
22 |
+
# uvicorn
|
23 |
+
fastapi==0.115.12
|
24 |
+
# via gradio
|
25 |
+
ffmpy==0.5.0
|
26 |
+
# via gradio
|
27 |
+
filelock==3.18.0
|
28 |
+
# via huggingface-hub
|
29 |
+
fsspec==2025.3.2
|
30 |
+
# via
|
31 |
+
# gradio-client
|
32 |
+
# huggingface-hub
|
33 |
+
gradio==5.6.0
|
34 |
+
# via layerdivider (pyproject.toml)
|
35 |
+
gradio-client==1.4.3
|
36 |
+
# via
|
37 |
+
# layerdivider (pyproject.toml)
|
38 |
+
# gradio
|
39 |
+
h11==0.14.0
|
40 |
+
# via
|
41 |
+
# httpcore
|
42 |
+
# uvicorn
|
43 |
+
httpcore==1.0.8
|
44 |
+
# via httpx
|
45 |
+
httpx==0.28.1
|
46 |
+
# via
|
47 |
+
# gradio
|
48 |
+
# gradio-client
|
49 |
+
# safehttpx
|
50 |
+
huggingface-hub==0.30.2
|
51 |
+
# via
|
52 |
+
# gradio
|
53 |
+
# gradio-client
|
54 |
+
idna==3.10
|
55 |
+
# via
|
56 |
+
# anyio
|
57 |
+
# httpx
|
58 |
+
# requests
|
59 |
+
imageio==2.37.0
|
60 |
+
# via scikit-image
|
61 |
+
jinja2==3.1.6
|
62 |
+
# via gradio
|
63 |
+
joblib==1.4.2
|
64 |
+
# via scikit-learn
|
65 |
+
lazy-loader==0.4
|
66 |
+
# via scikit-image
|
67 |
+
llvmlite==0.44.0
|
68 |
+
# via numba
|
69 |
+
markdown-it-py==3.0.0
|
70 |
+
# via rich
|
71 |
+
markupsafe==2.1.5
|
72 |
+
# via
|
73 |
+
# gradio
|
74 |
+
# jinja2
|
75 |
+
mdurl==0.1.2
|
76 |
+
# via markdown-it-py
|
77 |
+
networkx==3.4.2
|
78 |
+
# via scikit-image
|
79 |
+
numba==0.61.2
|
80 |
+
# via layerdivider (pyproject.toml)
|
81 |
+
numpy==2.2.4
|
82 |
+
# via
|
83 |
+
# layerdivider (pyproject.toml)
|
84 |
+
# gradio
|
85 |
+
# imageio
|
86 |
+
# numba
|
87 |
+
# opencv-python
|
88 |
+
# pandas
|
89 |
+
# pytoshop-whl
|
90 |
+
# scikit-image
|
91 |
+
# scikit-learn
|
92 |
+
# scipy
|
93 |
+
# tifffile
|
94 |
+
opencv-python==4.11.0.86
|
95 |
+
# via layerdivider (pyproject.toml)
|
96 |
+
orjson==3.10.16
|
97 |
+
# via gradio
|
98 |
+
packaging==24.2
|
99 |
+
# via
|
100 |
+
# gradio
|
101 |
+
# gradio-client
|
102 |
+
# huggingface-hub
|
103 |
+
# lazy-loader
|
104 |
+
# scikit-image
|
105 |
+
pandas==1.5.3
|
106 |
+
# via gradio
|
107 |
+
pillow==11.1.0
|
108 |
+
# via
|
109 |
+
# layerdivider (pyproject.toml)
|
110 |
+
# gradio
|
111 |
+
# imageio
|
112 |
+
# scikit-image
|
113 |
+
pydantic==2.11.3
|
114 |
+
# via
|
115 |
+
# fastapi
|
116 |
+
# gradio
|
117 |
+
pydantic-core==2.33.1
|
118 |
+
# via pydantic
|
119 |
+
pydub==0.25.1
|
120 |
+
# via gradio
|
121 |
+
pygments==2.19.1
|
122 |
+
# via rich
|
123 |
+
python-dateutil==2.9.0.post0
|
124 |
+
# via pandas
|
125 |
+
python-multipart==0.0.12
|
126 |
+
# via gradio
|
127 |
+
pytoshop-whl==0.1.0
|
128 |
+
# via layerdivider (pyproject.toml)
|
129 |
+
pytz==2025.2
|
130 |
+
# via pandas
|
131 |
+
pyyaml==6.0.2
|
132 |
+
# via
|
133 |
+
# gradio
|
134 |
+
# huggingface-hub
|
135 |
+
requests==2.32.3
|
136 |
+
# via huggingface-hub
|
137 |
+
rich==14.0.0
|
138 |
+
# via typer
|
139 |
+
ruff==0.11.5
|
140 |
+
# via gradio
|
141 |
+
safehttpx==0.1.6
|
142 |
+
# via gradio
|
143 |
+
scikit-image==0.25.2
|
144 |
+
# via layerdivider (pyproject.toml)
|
145 |
+
scikit-learn==1.6.1
|
146 |
+
# via layerdivider (pyproject.toml)
|
147 |
+
scipy==1.15.2
|
148 |
+
# via
|
149 |
+
# scikit-image
|
150 |
+
# scikit-learn
|
151 |
+
semantic-version==2.10.0
|
152 |
+
# via gradio
|
153 |
+
setuptools==78.1.0
|
154 |
+
# via layerdivider (pyproject.toml)
|
155 |
+
shellingham==1.5.4
|
156 |
+
# via typer
|
157 |
+
six==1.17.0
|
158 |
+
# via
|
159 |
+
# python-dateutil
|
160 |
+
# pytoshop-whl
|
161 |
+
sniffio==1.3.1
|
162 |
+
# via anyio
|
163 |
+
starlette==0.46.1
|
164 |
+
# via
|
165 |
+
# fastapi
|
166 |
+
# gradio
|
167 |
+
threadpoolctl==3.6.0
|
168 |
+
# via scikit-learn
|
169 |
+
tifffile==2025.3.30
|
170 |
+
# via scikit-image
|
171 |
+
tomlkit==0.12.0
|
172 |
+
# via gradio
|
173 |
+
tqdm==4.63.0
|
174 |
+
# via huggingface-hub
|
175 |
+
typer==0.15.2
|
176 |
+
# via gradio
|
177 |
+
typing==3.10.0.0
|
178 |
+
# via pytoshop-whl
|
179 |
+
typing-extensions==4.13.2
|
180 |
+
# via
|
181 |
+
# anyio
|
182 |
+
# fastapi
|
183 |
+
# gradio
|
184 |
+
# gradio-client
|
185 |
+
# huggingface-hub
|
186 |
+
# pydantic
|
187 |
+
# pydantic-core
|
188 |
+
# typer
|
189 |
+
# typing-inspection
|
190 |
+
typing-inspection==0.4.0
|
191 |
+
# via pydantic
|
192 |
+
urllib3==2.4.0
|
193 |
+
# via requests
|
194 |
+
uvicorn==0.34.0
|
195 |
+
# via gradio
|
196 |
+
websockets==12.0
|
197 |
+
# via gradio-client
|
segment_model/sample.txt
ADDED
File without changes
|
uv.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|