hysts HF staff commited on
Commit
51a0099
·
1 Parent(s): 1419872
.pre-commit-config.yaml CHANGED
@@ -1,6 +1,6 @@
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v4.6.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
@@ -18,13 +18,15 @@ repos:
18
  hooks:
19
  - id: docformatter
20
  args: ["--in-place"]
21
- - repo: https://github.com/pycqa/isort
22
- rev: 5.13.2
23
  hooks:
24
- - id: isort
25
- args: ["--profile", "black"]
 
 
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v1.9.0
28
  hooks:
29
  - id: mypy
30
  args: ["--ignore-missing-imports"]
@@ -35,14 +37,8 @@ repos:
35
  "types-PyYAML",
36
  "types-pytz",
37
  ]
38
- - repo: https://github.com/psf/black
39
- rev: 24.4.0
40
- hooks:
41
- - id: black
42
- language_version: python3.10
43
- args: ["--line-length", "119"]
44
  - repo: https://github.com/kynan/nbstripout
45
- rev: 0.7.1
46
  hooks:
47
  - id: nbstripout
48
  args:
@@ -51,7 +47,7 @@ repos:
51
  "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
52
  ]
53
  - repo: https://github.com/nbQA-dev/nbQA
54
- rev: 1.8.5
55
  hooks:
56
  - id: nbqa-black
57
  - id: nbqa-pyupgrade
 
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v5.0.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
 
18
  hooks:
19
  - id: docformatter
20
  args: ["--in-place"]
21
+ - repo: https://github.com/astral-sh/ruff-pre-commit
22
+ rev: v0.8.4
23
  hooks:
24
+ - id: ruff
25
+ args: ["--fix"]
26
+ - id: ruff-format
27
+ args: ["--line-length", "119"]
28
  - repo: https://github.com/pre-commit/mirrors-mypy
29
+ rev: v1.14.0
30
  hooks:
31
  - id: mypy
32
  args: ["--ignore-missing-imports"]
 
37
  "types-PyYAML",
38
  "types-pytz",
39
  ]
 
 
 
 
 
 
40
  - repo: https://github.com/kynan/nbstripout
41
+ rev: 0.8.1
42
  hooks:
43
  - id: nbstripout
44
  args:
 
47
  "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
48
  ]
49
  - repo: https://github.com/nbQA-dev/nbQA
50
+ rev: 1.9.1
51
  hooks:
52
  - id: nbqa-black
53
  - id: nbqa-pyupgrade
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10
.vscode/extensions.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "recommendations": [
3
+ "ms-python.python",
4
+ "charliermarsh.ruff",
5
+ "streetsidesoftware.code-spell-checker",
6
+ "tamasfe.even-better-toml"
7
+ ]
8
+ }
.vscode/settings.json CHANGED
@@ -2,29 +2,20 @@
2
  "editor.formatOnSave": true,
3
  "files.insertFinalNewline": false,
4
  "[python]": {
5
- "editor.defaultFormatter": "ms-python.black-formatter",
6
  "editor.formatOnType": true,
7
  "editor.codeActionsOnSave": {
 
8
  "source.organizeImports": "explicit"
9
  }
10
  },
11
  "[jupyter]": {
12
  "files.insertFinalNewline": false
13
  },
14
- "black-formatter.args": [
15
- "--line-length=119"
16
- ],
17
- "isort.args": ["--profile", "black"],
18
- "flake8.args": [
19
- "--max-line-length=119"
20
- ],
21
- "ruff.lint.args": [
22
- "--line-length=119"
23
- ],
24
  "notebook.output.scrolling": true,
25
  "notebook.formatOnCellExecution": true,
26
  "notebook.formatOnSave.enabled": true,
27
- "notebook.codeActionsOnSave": {
28
- "source.organizeImports": "explicit"
29
- }
30
  }
 
2
  "editor.formatOnSave": true,
3
  "files.insertFinalNewline": false,
4
  "[python]": {
5
+ "editor.defaultFormatter": "charliermarsh.ruff",
6
  "editor.formatOnType": true,
7
  "editor.codeActionsOnSave": {
8
+ "source.fixAll.ruff": "explicit",
9
  "source.organizeImports": "explicit"
10
  }
11
  },
12
  "[jupyter]": {
13
  "files.insertFinalNewline": false
14
  },
 
 
 
 
 
 
 
 
 
 
15
  "notebook.output.scrolling": true,
16
  "notebook.formatOnCellExecution": true,
17
  "notebook.formatOnSave.enabled": true,
18
+ "notebook.codeActionsOnSave": {
19
+ "source.organizeImports": "explicit"
20
+ }
21
  }
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 😻
4
  colorFrom: gray
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
  license: other
 
4
  colorFrom: gray
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 5.9.1
8
  app_file: app.py
9
  pinned: false
10
  license: other
app.py CHANGED
@@ -1,7 +1,5 @@
1
  #!/usr/bin/env python
2
 
3
- from __future__ import annotations
4
-
5
  import os
6
  import random
7
 
@@ -23,7 +21,7 @@ MAX_SEED = np.iinfo(np.int32).max
23
 
24
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
25
  if randomize_seed:
26
- seed = random.randint(0, MAX_SEED)
27
  return seed
28
 
29
 
@@ -34,7 +32,7 @@ if torch.cuda.is_available():
34
 
35
 
36
  @spaces.GPU
37
- def run(
38
  mode: str,
39
  prompt: str,
40
  image: PIL.Image.Image | None,
@@ -49,23 +47,23 @@ def run(
49
  pipe.set_text_to_image_mode()
50
  sample = pipe(prompt=prompt, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
51
  return sample.images[0], ""
52
- elif mode == "i2t":
53
  pipe.set_image_to_text_mode()
54
  sample = pipe(image=image, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
55
  return None, sample.text[0]
56
- elif mode == "joint":
57
  pipe.set_joint_mode()
58
  sample = pipe(num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
59
  return sample.images[0], sample.text[0]
60
- elif mode == "i":
61
  pipe.set_image_mode()
62
  sample = pipe(num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
63
  return sample.images[0], ""
64
- elif mode == "t":
65
  pipe.set_text_mode()
66
  sample = pipe(num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
67
  return None, sample.text[0]
68
- elif mode == "i2t2i":
69
  pipe.set_image_to_text_mode()
70
  sample = pipe(image=image, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
71
  pipe.set_text_to_image_mode()
@@ -76,7 +74,7 @@ def run(
76
  generator=generator,
77
  )
78
  return sample.images[0], ""
79
- elif mode == "t2i2t":
80
  pipe.set_text_to_image_mode()
81
  sample = pipe(prompt=prompt, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
82
  pipe.set_image_to_text_mode()
@@ -87,8 +85,7 @@ def run(
87
  generator=generator,
88
  )
89
  return None, sample.text[0]
90
- else:
91
- raise ValueError
92
 
93
 
94
  def create_demo(mode_name: str) -> gr.Blocks:
@@ -167,7 +164,7 @@ def create_demo(mode_name: str) -> gr.Blocks:
167
  return demo
168
 
169
 
170
- with gr.Blocks(css="style.css") as demo:
171
  gr.Markdown(DESCRIPTION)
172
  gr.DuplicateButton(
173
  value="Duplicate Space for private use",
 
1
  #!/usr/bin/env python
2
 
 
 
3
  import os
4
  import random
5
 
 
21
 
22
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
23
  if randomize_seed:
24
+ seed = random.randint(0, MAX_SEED) # noqa: S311
25
  return seed
26
 
27
 
 
32
 
33
 
34
  @spaces.GPU
35
+ def run( # noqa: PLR0911
36
  mode: str,
37
  prompt: str,
38
  image: PIL.Image.Image | None,
 
47
  pipe.set_text_to_image_mode()
48
  sample = pipe(prompt=prompt, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
49
  return sample.images[0], ""
50
+ if mode == "i2t":
51
  pipe.set_image_to_text_mode()
52
  sample = pipe(image=image, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
53
  return None, sample.text[0]
54
+ if mode == "joint":
55
  pipe.set_joint_mode()
56
  sample = pipe(num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
57
  return sample.images[0], sample.text[0]
58
+ if mode == "i":
59
  pipe.set_image_mode()
60
  sample = pipe(num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
61
  return sample.images[0], ""
62
+ if mode == "t":
63
  pipe.set_text_mode()
64
  sample = pipe(num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
65
  return None, sample.text[0]
66
+ if mode == "i2t2i":
67
  pipe.set_image_to_text_mode()
68
  sample = pipe(image=image, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
69
  pipe.set_text_to_image_mode()
 
74
  generator=generator,
75
  )
76
  return sample.images[0], ""
77
+ if mode == "t2i2t":
78
  pipe.set_text_to_image_mode()
79
  sample = pipe(prompt=prompt, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
80
  pipe.set_image_to_text_mode()
 
85
  generator=generator,
86
  )
87
  return None, sample.text[0]
88
+ raise ValueError
 
89
 
90
 
91
  def create_demo(mode_name: str) -> gr.Blocks:
 
164
  return demo
165
 
166
 
167
+ with gr.Blocks(css_paths="style.css") as demo:
168
  gr.Markdown(DESCRIPTION)
169
  gr.DuplicateButton(
170
  value="Duplicate Space for private use",
pyproject.toml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "unidiffuser"
3
+ version = "0.1.0"
4
+ description = ""
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = [
8
+ "accelerate>=1.2.1",
9
+ "diffusers>=0.32.1",
10
+ "gradio>=5.9.1",
11
+ "hf-transfer>=0.1.8",
12
+ "spaces>=0.31.1",
13
+ "torch==2.4.0",
14
+ "torchvision>=0.19.0",
15
+ "transformers>=4.47.1",
16
+ ]
17
+
18
+ [tool.ruff]
19
+ line-length = 119
20
+
21
+ [tool.ruff.lint]
22
+ select = ["ALL"]
23
+ ignore = [
24
+ "COM812", # missing-trailing-comma
25
+ "D203", # one-blank-line-before-class
26
+ "D213", # multi-line-summary-second-line
27
+ "E501", # line-too-long
28
+ "SIM117", # multiple-with-statements
29
+ ]
30
+ extend-ignore = [
31
+ "D100", # undocumented-public-module
32
+ "D101", # undocumented-public-class
33
+ "D102", # undocumented-public-method
34
+ "D103", # undocumented-public-function
35
+ "D104", # undocumented-public-package
36
+ "D105", # undocumented-magic-method
37
+ "D107", # undocumented-public-init
38
+ "EM101", # raw-string-in-exception
39
+ "FBT001", # boolean-type-hint-positional-argument
40
+ "FBT002", # boolean-default-value-positional-argument
41
+ "PD901", # pandas-df-variable-name
42
+ "PGH003", # blanket-type-ignore
43
+ "PLR0913", # too-many-arguments
44
+ "PLR0915", # too-many-statements
45
+ "TRY003", # raise-vanilla-args
46
+ ]
47
+ unfixable = [
48
+ "F401", # unused-import
49
+ ]
50
+
51
+ [tool.ruff.format]
52
+ docstring-code-format = true
requirements.txt CHANGED
@@ -1,8 +1,260 @@
1
- accelerate==0.33.0
2
- diffusers==0.30.1
3
- gradio==4.42.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  numpy==1.26.4
5
- spaces==0.29.3
6
- torch==2.0.1
7
- torchvision==0.15.2
8
- transformers==4.44.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile pyproject.toml -o requirements.txt
3
+ accelerate==1.2.1
4
+ # via unidiffuser (pyproject.toml)
5
+ aiofiles==23.2.1
6
+ # via gradio
7
+ annotated-types==0.7.0
8
+ # via pydantic
9
+ anyio==4.7.0
10
+ # via
11
+ # gradio
12
+ # httpx
13
+ # starlette
14
+ certifi==2024.12.14
15
+ # via
16
+ # httpcore
17
+ # httpx
18
+ # requests
19
+ charset-normalizer==3.4.1
20
+ # via requests
21
+ click==8.1.8
22
+ # via
23
+ # typer
24
+ # uvicorn
25
+ diffusers==0.32.1
26
+ # via unidiffuser (pyproject.toml)
27
+ exceptiongroup==1.2.2
28
+ # via anyio
29
+ fastapi==0.115.6
30
+ # via gradio
31
+ ffmpy==0.5.0
32
+ # via gradio
33
+ filelock==3.16.1
34
+ # via
35
+ # diffusers
36
+ # huggingface-hub
37
+ # torch
38
+ # transformers
39
+ # triton
40
+ fsspec==2024.12.0
41
+ # via
42
+ # gradio-client
43
+ # huggingface-hub
44
+ # torch
45
+ gradio==5.9.1
46
+ # via
47
+ # unidiffuser (pyproject.toml)
48
+ # spaces
49
+ gradio-client==1.5.2
50
+ # via gradio
51
+ h11==0.14.0
52
+ # via
53
+ # httpcore
54
+ # uvicorn
55
+ hf-transfer==0.1.8
56
+ # via unidiffuser (pyproject.toml)
57
+ httpcore==1.0.7
58
+ # via httpx
59
+ httpx==0.28.1
60
+ # via
61
+ # gradio
62
+ # gradio-client
63
+ # safehttpx
64
+ # spaces
65
+ huggingface-hub==0.27.0
66
+ # via
67
+ # accelerate
68
+ # diffusers
69
+ # gradio
70
+ # gradio-client
71
+ # tokenizers
72
+ # transformers
73
+ idna==3.10
74
+ # via
75
+ # anyio
76
+ # httpx
77
+ # requests
78
+ importlib-metadata==8.5.0
79
+ # via diffusers
80
+ jinja2==3.1.5
81
+ # via
82
+ # gradio
83
+ # torch
84
+ markdown-it-py==3.0.0
85
+ # via rich
86
+ markupsafe==2.1.5
87
+ # via
88
+ # gradio
89
+ # jinja2
90
+ mdurl==0.1.2
91
+ # via markdown-it-py
92
+ mpmath==1.3.0
93
+ # via sympy
94
+ networkx==3.4.2
95
+ # via torch
96
  numpy==1.26.4
97
+ # via
98
+ # accelerate
99
+ # diffusers
100
+ # gradio
101
+ # pandas
102
+ # torchvision
103
+ # transformers
104
+ nvidia-cublas-cu12==12.1.3.1
105
+ # via
106
+ # nvidia-cudnn-cu12
107
+ # nvidia-cusolver-cu12
108
+ # torch
109
+ nvidia-cuda-cupti-cu12==12.1.105
110
+ # via torch
111
+ nvidia-cuda-nvrtc-cu12==12.1.105
112
+ # via torch
113
+ nvidia-cuda-runtime-cu12==12.1.105
114
+ # via torch
115
+ nvidia-cudnn-cu12==9.1.0.70
116
+ # via torch
117
+ nvidia-cufft-cu12==11.0.2.54
118
+ # via torch
119
+ nvidia-curand-cu12==10.3.2.106
120
+ # via torch
121
+ nvidia-cusolver-cu12==11.4.5.107
122
+ # via torch
123
+ nvidia-cusparse-cu12==12.1.0.106
124
+ # via
125
+ # nvidia-cusolver-cu12
126
+ # torch
127
+ nvidia-nccl-cu12==2.20.5
128
+ # via torch
129
+ nvidia-nvjitlink-cu12==12.6.85
130
+ # via
131
+ # nvidia-cusolver-cu12
132
+ # nvidia-cusparse-cu12
133
+ nvidia-nvtx-cu12==12.1.105
134
+ # via torch
135
+ orjson==3.10.13
136
+ # via gradio
137
+ packaging==24.2
138
+ # via
139
+ # accelerate
140
+ # gradio
141
+ # gradio-client
142
+ # huggingface-hub
143
+ # spaces
144
+ # transformers
145
+ pandas==2.2.3
146
+ # via gradio
147
+ pillow==11.0.0
148
+ # via
149
+ # diffusers
150
+ # gradio
151
+ # torchvision
152
+ psutil==5.9.8
153
+ # via
154
+ # accelerate
155
+ # spaces
156
+ pydantic==2.10.4
157
+ # via
158
+ # fastapi
159
+ # gradio
160
+ # spaces
161
+ pydantic-core==2.27.2
162
+ # via pydantic
163
+ pydub==0.25.1
164
+ # via gradio
165
+ pygments==2.18.0
166
+ # via rich
167
+ python-dateutil==2.9.0.post0
168
+ # via pandas
169
+ python-multipart==0.0.20
170
+ # via gradio
171
+ pytz==2024.2
172
+ # via pandas
173
+ pyyaml==6.0.2
174
+ # via
175
+ # accelerate
176
+ # gradio
177
+ # huggingface-hub
178
+ # transformers
179
+ regex==2024.11.6
180
+ # via
181
+ # diffusers
182
+ # transformers
183
+ requests==2.32.3
184
+ # via
185
+ # diffusers
186
+ # huggingface-hub
187
+ # spaces
188
+ # transformers
189
+ rich==13.9.4
190
+ # via typer
191
+ ruff==0.8.4
192
+ # via gradio
193
+ safehttpx==0.1.6
194
+ # via gradio
195
+ safetensors==0.4.5
196
+ # via
197
+ # accelerate
198
+ # diffusers
199
+ # transformers
200
+ semantic-version==2.10.0
201
+ # via gradio
202
+ shellingham==1.5.4
203
+ # via typer
204
+ six==1.17.0
205
+ # via python-dateutil
206
+ sniffio==1.3.1
207
+ # via anyio
208
+ spaces==0.31.1
209
+ # via unidiffuser (pyproject.toml)
210
+ starlette==0.41.3
211
+ # via
212
+ # fastapi
213
+ # gradio
214
+ sympy==1.13.3
215
+ # via torch
216
+ tokenizers==0.21.0
217
+ # via transformers
218
+ tomlkit==0.13.2
219
+ # via gradio
220
+ torch==2.4.0
221
+ # via
222
+ # unidiffuser (pyproject.toml)
223
+ # accelerate
224
+ # torchvision
225
+ torchvision==0.19.0
226
+ # via unidiffuser (pyproject.toml)
227
+ tqdm==4.67.1
228
+ # via
229
+ # huggingface-hub
230
+ # transformers
231
+ transformers==4.47.1
232
+ # via unidiffuser (pyproject.toml)
233
+ triton==3.0.0
234
+ # via torch
235
+ typer==0.15.1
236
+ # via gradio
237
+ typing-extensions==4.12.2
238
+ # via
239
+ # anyio
240
+ # fastapi
241
+ # gradio
242
+ # gradio-client
243
+ # huggingface-hub
244
+ # pydantic
245
+ # pydantic-core
246
+ # rich
247
+ # spaces
248
+ # torch
249
+ # typer
250
+ # uvicorn
251
+ tzdata==2024.2
252
+ # via pandas
253
+ urllib3==2.3.0
254
+ # via requests
255
+ uvicorn==0.34.0
256
+ # via gradio
257
+ websockets==14.1
258
+ # via gradio-client
259
+ zipp==3.21.0
260
+ # via importlib-metadata
uv.lock ADDED
The diff for this file is too large to render. See raw diff