ginipick commited on
Commit
7d12b68
·
verified ·
1 Parent(s): 89af645

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -334
app.py DELETED
@@ -1,334 +0,0 @@
1
- import os
2
- # ZeroGPU 환경 설정 - 가장 먼저 실행되어야 함!
3
- os.environ['CUDA_VISIBLE_DEVICES'] = ''
4
- os.environ['ZEROGPU'] = '1' # ZeroGPU 환경임을 표시
5
-
6
- # Safetensors 사용 강제
7
- os.environ['SAFETENSORS_FAST_GPU'] = '1'
8
- os.environ['TRANSFORMERS_OFFLINE'] = '0'
9
- os.environ['TRANSFORMERS_USE_SAFETENSORS'] = '1'
10
-
11
- # Patch transformers to add missing SiglipImageProcessor
12
- import sys
13
- from types import ModuleType
14
-
15
- # Create mock for SiglipImageProcessor before importing transformers
16
- if 'transformers' not in sys.modules:
17
- # Create a dummy SiglipImageProcessor class
18
- class DummySiglipImageProcessor:
19
- def __init__(self, *args, **kwargs):
20
- pass
21
-
22
- class DummySiglipVisionModel:
23
- def __init__(self, *args, **kwargs):
24
- pass
25
-
26
- # Pre-patch transformers module
27
- transformers_module = ModuleType('transformers')
28
- transformers_module.SiglipImageProcessor = DummySiglipImageProcessor
29
- transformers_module.SiglipVisionModel = DummySiglipVisionModel
30
-
31
- # Add to sys.modules
32
- sys.modules['transformers'] = transformers_module
33
-
34
- import spaces # spaces import는 환경 설정 후에
35
- import shlex
36
- import subprocess
37
-
38
- # 라이브러리 버전 호환성 문제 해결
39
- subprocess.run(shlex.split("pip install pip==24.0"), check=True)
40
-
41
- # Safetensors 설치
42
- subprocess.run(shlex.split("pip install safetensors --upgrade"), check=True)
43
-
44
- subprocess.run(
45
- shlex.split(
46
- "pip install package/onnxruntime_gpu-1.17.0-cp310-cp310-manylinux_2_28_x86_64.whl --force-reinstall --no-deps"
47
- ), check=True
48
- )
49
- subprocess.run(
50
- shlex.split(
51
- "pip install package/nvdiffrast-0.3.1.torch-cp310-cp310-linux_x86_64.whl --force-reinstall --no-deps"
52
- ), check=True
53
- )
54
-
55
- # Import transformers and patch it
56
- import transformers
57
-
58
- # Add missing classes if they don't exist
59
- if not hasattr(transformers, 'SiglipImageProcessor'):
60
- class SiglipImageProcessor:
61
- def __init__(self, *args, **kwargs):
62
- # Fallback to CLIPImageProcessor
63
- self._processor = transformers.CLIPImageProcessor(*args, **kwargs)
64
-
65
- def __getattr__(self, name):
66
- return getattr(self._processor, name)
67
-
68
- transformers.SiglipImageProcessor = SiglipImageProcessor
69
-
70
- if not hasattr(transformers, 'SiglipVisionModel'):
71
- class SiglipVisionModel:
72
- def __init__(self, *args, **kwargs):
73
- # Fallback to CLIPVisionModel
74
- from transformers import CLIPVisionModel
75
- self._model = CLIPVisionModel(*args, **kwargs)
76
-
77
- def __getattr__(self, name):
78
- return getattr(self._model, name)
79
-
80
- @classmethod
81
- def from_pretrained(cls, *args, **kwargs):
82
- instance = cls.__new__(cls)
83
- from transformers import CLIPVisionModel
84
- instance._model = CLIPVisionModel.from_pretrained(*args, **kwargs)
85
- return instance
86
-
87
- transformers.SiglipVisionModel = SiglipVisionModel
88
-
89
- # 모델 체크포인트 다운로드 및 torch 설정
90
- if __name__ == "__main__":
91
- from huggingface_hub import snapshot_download
92
-
93
- snapshot_download("public-data/Unique3D", repo_type="model", local_dir="./ckpt")
94
-
95
- import os
96
- import sys
97
- sys.path.append(os.curdir)
98
- import torch
99
- torch.set_float32_matmul_precision('medium')
100
- torch.backends.cuda.matmul.allow_tf32 = True
101
- torch.set_grad_enabled(False)
102
-
103
- import fire
104
- import gradio as gr
105
- from gradio_app.gradio_3dgen import create_ui as create_3d_ui
106
- from gradio_app.all_models import model_zoo
107
-
108
- # ===============================
109
- # Text-to-IMAGE 관련 API 함수 정의
110
- # ===============================
111
- @spaces.GPU(duration=60) # GPU 사용 시간 60초로 설정
112
- def text_to_image(height, width, steps, scales, prompt, seed):
113
- """
114
- 주어진 파라미터를 이용해 외부 API의 /process_and_save_image 엔드포인트를 호출하여 이미지를 생성한다.
115
- """
116
- # GPU가 할당된 상태에서 실행
117
- os.environ['CUDA_VISIBLE_DEVICES'] = '0'
118
-
119
- from gradio_client import Client
120
- client = Client(os.getenv("CLIENT_API"))
121
- result = client.predict(
122
- height,
123
- width,
124
- steps,
125
- scales,
126
- prompt,
127
- seed,
128
- api_name="/process_and_save_image"
129
- )
130
- if isinstance(result, dict):
131
- return result.get("url", None)
132
- else:
133
- return result
134
-
135
- def update_random_seed():
136
- """
137
- 외부 API의 /update_random_seed 엔드포인트를 호출하여 새로운 랜덤 시드 값을 가져온다.
138
- """
139
- from gradio_client import Client
140
- client = Client(os.getenv("CLIENT_API"))
141
- return client.predict(api_name="/update_random_seed")
142
-
143
- # 3D 생성 함수를 위한 래퍼 (GPU 데코레이터 적용)
144
- @spaces.GPU(duration=120) # 3D 생성은 더 많은 시간 필요
145
- def generate_3d_wrapper(*args, **kwargs):
146
- """3D 생성 함수를 GPU 환경에서 실행"""
147
- os.environ['CUDA_VISIBLE_DEVICES'] = '0'
148
- # 실제 3D 생성 로직이 여기서 실행됨
149
- # model_zoo의 함수들이 여기서 호출될 것임
150
- return model_zoo.generate_3d(*args, **kwargs)
151
-
152
- _TITLE = '''✨ 3D LLAMA Studio'''
153
- _DESCRIPTION = '''
154
- ### Welcome to 3D Llama Studio - Your Advanced 3D Generation Platform
155
- This platform offers two powerful features:
156
- 1. **Text/Image to 3D**: Generate detailed 3D models from text descriptions or reference images
157
- 2. **Text to Styled Image**: Create artistic images that can be used for 3D generation
158
- *Note: Both English and Korean prompts are supported (영어와 한글 프롬프트 모두 지원됩니다)*
159
- **Running on ZeroGPU** 🚀
160
- '''
161
-
162
- # CSS 스타일 밝은 테마로 수정
163
- custom_css = """
164
- .gradio-container {
165
- background-color: #ffffff;
166
- color: #333333;
167
- }
168
- .tabs {
169
- background-color: #f8f9fa;
170
- border-radius: 10px;
171
- padding: 10px;
172
- margin: 10px 0;
173
- box-shadow: 0 2px 4px rgba(0,0,0,0.1);
174
- }
175
- .input-box {
176
- background-color: #ffffff;
177
- border: 1px solid #e0e0e0;
178
- border-radius: 8px;
179
- padding: 15px;
180
- margin: 10px 0;
181
- box-shadow: 0 1px 3px rgba(0,0,0,0.05);
182
- }
183
- .button-primary {
184
- background-color: #4a90e2 !important;
185
- border: none !important;
186
- color: white !important;
187
- transition: all 0.3s ease;
188
- }
189
- .button-primary:hover {
190
- background-color: #357abd !important;
191
- transform: translateY(-1px);
192
- }
193
- .button-secondary {
194
- background-color: #f0f0f0 !important;
195
- border: 1px solid #e0e0e0 !important;
196
- color: #333333 !important;
197
- transition: all 0.3s ease;
198
- }
199
- .button-secondary:hover {
200
- background-color: #e0e0e0 !important;
201
- }
202
- .main-title {
203
- color: #2c3e50;
204
- font-weight: bold;
205
- margin-bottom: 20px;
206
- }
207
- .slider-label {
208
- color: #2c3e50;
209
- font-weight: 500;
210
- }
211
- .textbox-input {
212
- border: 1px solid #e0e0e0 !important;
213
- background-color: #ffffff !important;
214
- }
215
- .zerogpu-badge {
216
- background-color: #4CAF50;
217
- color: white;
218
- padding: 5px 10px;
219
- border-radius: 5px;
220
- font-size: 14px;
221
- margin-left: 10px;
222
- }
223
- """
224
-
225
- # Gradio 테마 설정 수정
226
- def launch():
227
- # CPU 모드로 모델 초기화
228
- os.environ['CUDA_VISIBLE_DEVICES'] = ''
229
- model_zoo.init_models()
230
-
231
- with gr.Blocks(
232
- title=_TITLE,
233
- css=custom_css,
234
- theme=gr.themes.Soft(
235
- primary_hue="blue",
236
- secondary_hue="slate",
237
- neutral_hue="slate",
238
- font=["Inter", "Arial", "sans-serif"]
239
- )
240
- ) as demo:
241
-
242
- with gr.Row():
243
- with gr.Column():
244
- gr.Markdown('# ' + _TITLE, elem_classes="main-title")
245
- with gr.Column():
246
- gr.HTML('<span class="zerogpu-badge">ZeroGPU Enabled</span>')
247
- gr.Markdown(_DESCRIPTION)
248
-
249
- with gr.Tabs() as tabs:
250
- with gr.Tab("🎨 Text to Styled Image", elem_classes="tab"):
251
- with gr.Group(elem_classes="input-box"):
252
- gr.Markdown("### Image Generation Settings")
253
- with gr.Row():
254
- with gr.Column():
255
- height_slider = gr.Slider(
256
- label="Image Height",
257
- minimum=256,
258
- maximum=2048,
259
- step=64,
260
- value=1024,
261
- info="Select image height (pixels)"
262
- )
263
- width_slider = gr.Slider(
264
- label="Image Width",
265
- minimum=256,
266
- maximum=2048,
267
- step=64,
268
- value=1024,
269
- info="Select image width (pixels)"
270
- )
271
- with gr.Column():
272
- steps_slider = gr.Slider(
273
- label="Generation Steps",
274
- minimum=1,
275
- maximum=100,
276
- step=1,
277
- value=8,
278
- info="More steps = higher quality but slower"
279
- )
280
- scales_slider = gr.Slider(
281
- label="Guidance Scale",
282
- minimum=1.0,
283
- maximum=10.0,
284
- step=0.1,
285
- value=3.5,
286
- info="How closely to follow the prompt"
287
- )
288
-
289
- prompt_text = gr.Textbox(
290
- label="Image Description",
291
- placeholder="Enter your prompt here (English or Korean)",
292
- lines=3,
293
- elem_classes="input-box"
294
- )
295
-
296
- with gr.Row():
297
- seed_number = gr.Number(
298
- label="Seed (Empty = Random)",
299
- value=None,
300
- elem_classes="input-box"
301
- )
302
- update_seed_button = gr.Button(
303
- "🎲 Random Seed",
304
- elem_classes="button-secondary"
305
- )
306
-
307
- generate_button = gr.Button(
308
- "🚀 Generate Image",
309
- elem_classes="button-primary"
310
- )
311
-
312
- with gr.Group(elem_classes="input-box"):
313
- gr.Markdown("### Generated Result")
314
- image_output = gr.Image(label="Output Image")
315
-
316
- update_seed_button.click(
317
- fn=update_random_seed,
318
- inputs=[],
319
- outputs=seed_number
320
- )
321
-
322
- generate_button.click(
323
- fn=text_to_image,
324
- inputs=[height_slider, width_slider, steps_slider, scales_slider, prompt_text, seed_number],
325
- outputs=image_output
326
- )
327
-
328
- with gr.Tab("🎯 Image to 3D", elem_classes="tab"):
329
- create_3d_ui("wkl")
330
-
331
- demo.queue().launch(share=True)
332
-
333
- if __name__ == '__main__':
334
- fire.Fire(launch)