developer0hye commited on
Commit
5e746b8
·
verified ·
1 Parent(s): fbbbccb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -139
app.py CHANGED
@@ -1,154 +1,87 @@
1
  import gradio as gr
2
  import spaces
3
  import torch
4
- import math
5
- import numpy as np
6
  import os
7
- from PIL import Image
8
- import torchvision.transforms as T
9
- from torchvision.transforms.functional import InterpolationMode
10
- from transformers import AutoModel, AutoTokenizer, AutoConfig
11
-
12
- # =============================================================================
13
- # InternVL‑3 preprocessing utilities (image‑only version)
14
- # =============================================================================
15
- IMAGENET_MEAN = (0.485, 0.456, 0.406)
16
- IMAGENET_STD = (0.229, 0.224, 0.225)
17
-
18
-
19
- def build_transform(input_size: int = 448):
20
- """Return torchvision transform matching InternVL pre‑training."""
21
- return T.Compose(
22
- [
23
- T.Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img),
24
- T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
25
- T.ToTensor(),
26
- T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
27
- ]
28
- )
29
-
30
-
31
- def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
32
- best_ratio_diff = float("inf")
33
- best_ratio = (1, 1)
34
- area = width * height
35
- for ratio in target_ratios:
36
- tgt_ar = ratio[0] / ratio[1]
37
- diff = abs(aspect_ratio - tgt_ar)
38
- if diff < best_ratio_diff or (diff == best_ratio_diff and area > 0.5 * image_size * image_size * ratio[0] * ratio[1]):
39
- best_ratio_diff = diff
40
- best_ratio = ratio
41
- return best_ratio
42
-
43
-
44
- def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
45
- """Split arbitrarily‑sized image into ≤12 tiles sized 448×448 (InternVL spec)."""
46
- ow, oh = image.size
47
- aspect_ratio = ow / oh
48
- target_ratios = sorted(
49
- {(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if min_num <= i * j <= max_num},
50
- key=lambda x: x[0] * x[1],
51
- )
52
- ratio = find_closest_aspect_ratio(aspect_ratio, target_ratios, ow, oh, image_size)
53
- tw, th = image_size * ratio[0], image_size * ratio[1]
54
- blocks = ratio[0] * ratio[1]
55
- resized = image.resize((tw, th))
56
- tiles = [
57
- resized.crop(
58
- (
59
- (idx % (tw // image_size)) * image_size,
60
- (idx // (tw // image_size)) * image_size,
61
- ((idx % (tw // image_size)) + 1) * image_size,
62
- ((idx // (tw // image_size)) + 1) * image_size,
63
- )
64
- )
65
- for idx in range(blocks)
66
- ]
67
- if use_thumbnail and blocks != 1:
68
- tiles.append(image.resize((image_size, image_size)))
69
- return tiles
70
-
71
-
72
- def load_image(path: str, input_size: int = 448, max_num: int = 12):
73
- """Return tensor of shape (N, 3, H, W) ready for InternVL."""
74
- img = Image.open(path).convert("RGB")
75
- transform = build_transform(input_size)
76
- tiles = dynamic_preprocess(img, image_size=input_size, use_thumbnail=True, max_num=max_num)
77
- return torch.stack([transform(t) for t in tiles])
78
 
 
 
79
 
80
  # =============================================================================
81
- # InternVL‑3‑8B model loading (multi‑GPU aware)
82
  # =============================================================================
83
- MODEL_ID = "OpenGVLab/InternVL3-8B"
84
-
85
-
86
- def split_model(model_name: str):
87
- """Distribute LLM layers across GPUs, keeping vision encoder on GPU 0."""
88
- n_gpu = torch.cuda.device_count()
89
- if n_gpu < 2:
90
- return "auto" # let transformers decide
91
-
92
- cfg = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
93
- n_layers = cfg.llm_config.num_hidden_layers # type: ignore[attr-defined]
94
-
95
- # GPU0 does vision + some text layers => treat as 0.5 GPU
96
- per_gpu = math.ceil(n_layers / (n_gpu - 0.5))
97
- alloc = [per_gpu] * n_gpu
98
- alloc[0] = math.ceil(alloc[0] * 0.5)
99
-
100
- dmap = {
101
- "vision_model": 0,
102
- "mlp1": 0,
103
- "language_model.model.tok_embeddings": 0,
104
- "language_model.model.embed_tokens": 0,
105
- "language_model.output": 0,
106
- "language_model.model.norm": 0,
107
- "language_model.model.rotary_emb": 0,
108
- "language_model.lm_head": 0,
109
- }
110
- layer_idx = 0
111
- for gpu, n in enumerate(alloc):
112
- for _ in range(n):
113
- if layer_idx >= n_layers:
114
- break
115
- dmap[f"language_model.model.layers.{layer_idx}"] = 0 if layer_idx == n_layers - 1 else gpu
116
- layer_idx += 1
117
- return dmap
118
-
119
-
120
- device_map = split_model(MODEL_ID)
121
-
122
- model = AutoModel.from_pretrained(
123
  MODEL_ID,
124
- torch_dtype=torch.bfloat16,
125
- low_cpu_mem_usage=True,
126
- use_flash_attn=True,
127
- trust_remote_code=True,
128
- device_map=device_map,
129
- ).eval()
130
 
131
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True, use_fast=False)
 
 
 
132
 
133
 
134
  # =============================================================================
135
- # Inference function (imageonly)
136
  # =============================================================================
137
  @spaces.GPU
138
- def internvl_inference(image_path: str | None, text_input: str | None = None):
139
  if image_path is None:
140
  return "Please upload an image first."
141
- pixel_values = load_image(image_path, max_num=12).to(torch.bfloat16).cuda()
142
- prompt = f"<image>\n{text_input}" if text_input else "<image>\n"
143
- gen_cfg = dict(max_new_tokens=1024, do_sample=True)
144
- return model.chat(tokenizer, pixel_values, prompt, gen_cfg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
 
147
  # =============================================================================
148
- # Gradio UI (image‑only, Gradio 5 compatible)
149
  # =============================================================================
150
  DESCRIPTION = (
151
- "[InternVL 3‑8B demo](https://huggingface.co/OpenGVLab/InternVL3-8B) — "
152
  "upload an image and ask anything about it."
153
  )
154
 
@@ -164,26 +97,23 @@ with gr.Blocks(css=css, theme="origin") as demo:
164
  gr.Markdown(DESCRIPTION)
165
 
166
  with gr.Row():
167
- # Left column: image, question, submit button (stacked vertically)
168
  with gr.Column(scale=1):
169
  input_image = gr.Image(label="Upload Image", type="filepath")
170
  text_input = gr.Textbox(label="Question")
171
  submit_btn = gr.Button("Submit")
172
- # Right column: model output
173
  with gr.Column(scale=1):
174
  output_text = gr.Textbox(label="Model Output", elem_id="output_text")
175
-
176
- # 🔽 예제 추가
177
  gr.Examples(
178
- examples=[["example.webp", "explain this image"]],
179
  inputs=[input_image, text_input],
180
  outputs=output_text,
181
- fn=internvl_inference, # 클릭 시 바로 실행하려면 지정
182
- cache_examples=True, # 결과 캐시(선택)
183
- label="Try an example" # 표기명(선택)
184
  )
185
-
186
- submit_btn.click(internvl_inference, [input_image, text_input], [output_text])
187
 
188
  if __name__ == "__main__":
189
  demo.launch()
 
1
  import gradio as gr
2
  import spaces
3
  import torch
 
 
4
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
7
+ from qwen_vl_utils import process_vision_info # pip install qwen-vl-utils[decord]==0.0.8
8
 
9
  # =============================================================================
10
+ # Qwen2.5-VL-7B-Instruct: model & processor
11
  # =============================================================================
12
+ MODEL_ID = "Qwen/Qwen2.5-VL-7B-Instruct"
13
+
14
+ # 권장: flash-attn2 사용 (환경에 따라 주석 해제)
15
+ # model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
16
+ # MODEL_ID,
17
+ # torch_dtype=torch.bfloat16,
18
+ # attn_implementation="flash_attention_2",
19
+ # device_map="auto",
20
+ # )
21
+
22
+ # 기본 로드
23
+ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  MODEL_ID,
25
+ torch_dtype="auto",
26
+ device_map="auto",
27
+ )
28
+ model.eval()
 
 
29
 
30
+ # 해상도 자동 조절(기본값 사용). 필요시 min/max_pixels로 토큰 비용 제어 가능.
31
+ processor = AutoProcessor.from_pretrained(MODEL_ID)
32
+ # 예: min_pixels = 256*28*28; max_pixels = 1280*28*28
33
+ # processor = AutoProcessor.from_pretrained(MODEL_ID, min_pixels=min_pixels, max_pixels=max_pixels)
34
 
35
 
36
  # =============================================================================
37
+ # Inference (image-only UI, text는 선택)
38
  # =============================================================================
39
  @spaces.GPU
40
+ def qwen_vl_inference(image_path: str | None, text_input: str | None = None):
41
  if image_path is None:
42
  return "Please upload an image first."
43
+
44
+ # Qwen은 파일 경로를 file:// URI로 전달하는 방식을 공식 예제로 제공
45
+ file_uri = f"file://{os.path.abspath(image_path)}"
46
+ user_text = text_input.strip() if text_input else "Describe this image."
47
+
48
+ messages = [
49
+ {
50
+ "role": "user",
51
+ "content": [
52
+ {"type": "image", "image": file_uri},
53
+ {"type": "text", "text": user_text},
54
+ ],
55
+ }
56
+ ]
57
+
58
+ # 텍스트/비전 전처리
59
+ chat_text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
60
+ image_inputs, video_inputs = process_vision_info(messages)
61
+ inputs = processor(
62
+ text=[chat_text],
63
+ images=image_inputs,
64
+ videos=video_inputs,
65
+ padding=True,
66
+ return_tensors="pt",
67
+ )
68
+
69
+ # 모델 디바이스로 이동 (device_map="auto" 환경에서도 안전)
70
+ inputs = {k: (v.to(model.device) if isinstance(v, torch.Tensor) else v) for k, v in inputs.items()}
71
+
72
+ # 생성
73
+ gen_ids = model.generate(**inputs, max_new_tokens=512)
74
+ # 입력 토큰 제거 후 디코딩
75
+ trimmed = [out[len(inp):] for inp, out in zip(inputs["input_ids"], gen_ids)]
76
+ output = processor.batch_decode(trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
77
+ return output
78
 
79
 
80
  # =============================================================================
81
+ # Gradio UI (Gradio 5)
82
  # =============================================================================
83
  DESCRIPTION = (
84
+ "[Qwen2.5-VL-7B-Instruct demo](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) — "
85
  "upload an image and ask anything about it."
86
  )
87
 
 
97
  gr.Markdown(DESCRIPTION)
98
 
99
  with gr.Row():
 
100
  with gr.Column(scale=1):
101
  input_image = gr.Image(label="Upload Image", type="filepath")
102
  text_input = gr.Textbox(label="Question")
103
  submit_btn = gr.Button("Submit")
 
104
  with gr.Column(scale=1):
105
  output_text = gr.Textbox(label="Model Output", elem_id="output_text")
106
+
 
107
  gr.Examples(
108
+ examples=[["example.webp", "Explain this image"]],
109
  inputs=[input_image, text_input],
110
  outputs=output_text,
111
+ fn=qwen_vl_inference,
112
+ cache_examples=True,
113
+ label="Try an example"
114
  )
115
+
116
+ submit_btn.click(qwen_vl_inference, [input_image, text_input], [output_text])
117
 
118
  if __name__ == "__main__":
119
  demo.launch()