osmanio2 commited on
Commit
ae7953e
·
verified ·
1 Parent(s): 848395f

training vision only on describe task and describe grid

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. added_tokens.json +25 -0
  2. checkpoint-4800/added_tokens.json +25 -0
  3. checkpoint-4800/config.json +55 -0
  4. checkpoint-4800/configuration_minicpm.py +100 -0
  5. checkpoint-4800/generation_config.json +6 -0
  6. checkpoint-4800/latest +1 -0
  7. checkpoint-4800/merges.txt +0 -0
  8. checkpoint-4800/model-00001-of-00004.safetensors +3 -0
  9. checkpoint-4800/model-00002-of-00004.safetensors +3 -0
  10. checkpoint-4800/model-00003-of-00004.safetensors +3 -0
  11. checkpoint-4800/model-00004-of-00004.safetensors +3 -0
  12. checkpoint-4800/model.safetensors.index.json +796 -0
  13. checkpoint-4800/modeling_minicpmv.py +403 -0
  14. checkpoint-4800/modeling_navit_siglip.py +937 -0
  15. checkpoint-4800/resampler.py +782 -0
  16. checkpoint-4800/rng_state_0.pth +3 -0
  17. checkpoint-4800/rng_state_1.pth +3 -0
  18. checkpoint-4800/rng_state_2.pth +3 -0
  19. checkpoint-4800/rng_state_3.pth +3 -0
  20. checkpoint-4800/rng_state_4.pth +3 -0
  21. checkpoint-4800/rng_state_5.pth +3 -0
  22. checkpoint-4800/rng_state_6.pth +3 -0
  23. checkpoint-4800/rng_state_7.pth +3 -0
  24. checkpoint-4800/special_tokens_map.json +52 -0
  25. checkpoint-4800/tokenization_minicpmv_fast.py +66 -0
  26. checkpoint-4800/tokenizer.json +0 -0
  27. checkpoint-4800/tokenizer_config.json +235 -0
  28. checkpoint-4800/trainer_state.json +0 -0
  29. checkpoint-4800/training_args.bin +3 -0
  30. checkpoint-4800/vocab.json +0 -0
  31. checkpoint-4800/zero_to_fp32.py +604 -0
  32. checkpoint-5000/added_tokens.json +25 -0
  33. checkpoint-5000/config.json +55 -0
  34. checkpoint-5000/configuration_minicpm.py +100 -0
  35. checkpoint-5000/generation_config.json +6 -0
  36. checkpoint-5000/latest +1 -0
  37. checkpoint-5000/merges.txt +0 -0
  38. checkpoint-5000/model-00001-of-00004.safetensors +3 -0
  39. checkpoint-5000/model-00002-of-00004.safetensors +3 -0
  40. checkpoint-5000/model-00003-of-00004.safetensors +3 -0
  41. checkpoint-5000/model-00004-of-00004.safetensors +3 -0
  42. checkpoint-5000/model.safetensors.index.json +796 -0
  43. checkpoint-5000/modeling_minicpmv.py +403 -0
  44. checkpoint-5000/modeling_navit_siglip.py +937 -0
  45. checkpoint-5000/resampler.py +782 -0
  46. checkpoint-5000/rng_state_0.pth +3 -0
  47. checkpoint-5000/rng_state_1.pth +3 -0
  48. checkpoint-5000/rng_state_2.pth +3 -0
  49. checkpoint-5000/rng_state_3.pth +3 -0
  50. checkpoint-5000/rng_state_4.pth +3 -0
added_tokens.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</box>": 151651,
3
+ "</image>": 151647,
4
+ "</image_id>": 151659,
5
+ "</point>": 151655,
6
+ "</quad>": 151653,
7
+ "</ref>": 151649,
8
+ "</slice>": 151657,
9
+ "<box>": 151650,
10
+ "": 151647,
4
+ "</image_id>": 151659,
5
+ "</point>": 151655,
6
+ "</quad>": 151653,
7
+ "</ref>": 151649,
8
+ "</slice>": 151657,
9
+ "<box>": 151650,
10
+ ")")
337
+ elif isinstance(c, str):
338
+ cur_msgs.append(c)
339
+ msg["content"] = "\n".join(cur_msgs)
340
+
341
+ if system_prompt:
342
+ sys_msg = {'role': 'system', 'content': system_prompt}
343
+ copy_msgs = [sys_msg] + copy_msgs
344
+
345
+ prompts_lists.append(processor.tokenizer.apply_chat_template(copy_msgs, tokenize=False, add_generation_prompt=True))
346
+ input_images_lists.append(images)
347
+
348
+ inputs = processor(
349
+ prompts_lists,
350
+ input_images_lists,
351
+ max_slice_nums=max_slice_nums,
352
+ use_image_id=use_image_id,
353
+ return_tensors="pt",
354
+ max_length=max_inp_length
355
+ ).to(self.device)
356
+
357
+ if sampling:
358
+ generation_config = {
359
+ "top_p": 0.8,
360
+ "top_k": 100,
361
+ "temperature": 0.7,
362
+ "do_sample": True,
363
+ "repetition_penalty": 1.05
364
+ }
365
+ else:
366
+ generation_config = {
367
+ "num_beams": 3,
368
+ "repetition_penalty": 1.2,
369
+ }
370
+
371
+ if min_new_tokens > 0:
372
+ generation_config['min_new_tokens'] = min_new_tokens
373
+
374
+ generation_config.update(
375
+ (k, kwargs[k]) for k in generation_config.keys() & kwargs.keys()
376
+ )
377
+
378
+ inputs.pop("image_sizes")
379
+ with torch.inference_mode():
380
+ res = self.generate(
381
+ **inputs,
382
+ tokenizer=tokenizer,
383
+ max_new_tokens=max_new_tokens,
384
+ vision_hidden_states=vision_hidden_states,
385
+ stream=stream,
386
+ decode_text=True,
387
+ **generation_config
388
+ )
389
+
390
+ if stream:
391
+ def stream_gen():
392
+ for text in res:
393
+ for term in self.terminators:
394
+ text = text.replace(term, '')
395
+ yield text
396
+ return stream_gen()
397
+
398
+ else:
399
+ if batched:
400
+ answer = res
401
+ else:
402
+ answer = res[0]
403
+ return answer
checkpoint-4800/modeling_navit_siglip.py ADDED
@@ -0,0 +1,937 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Google AI and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Siglip model. """
16
+ # Copied from HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit and add tgt_sizes
17
+
18
+
19
+ import os
20
+ import math
21
+ import warnings
22
+ from dataclasses import dataclass
23
+ from typing import Any, Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.utils.checkpoint
29
+ from torch import nn
30
+ from torch.nn.init import _calculate_fan_in_and_fan_out
31
+
32
+ from transformers.activations import ACT2FN
33
+ from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
34
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
35
+ from transformers.modeling_utils import PreTrainedModel
36
+ from transformers.configuration_utils import PretrainedConfig
37
+ from transformers.utils import (
38
+ ModelOutput,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ is_flash_attn_2_available,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from transformers.utils import logging
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ class SiglipVisionConfig(PretrainedConfig):
50
+ r"""
51
+ This is the configuration class to store the configuration of a [`SiglipVisionModel`]. It is used to instantiate a
52
+ Siglip vision encoder according to the specified arguments, defining the model architecture. Instantiating a
53
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip
54
+ [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.
55
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
56
+ documentation from [`PretrainedConfig`] for more information.
57
+ Args:
58
+ hidden_size (`int`, *optional*, defaults to 768):
59
+ Dimensionality of the encoder layers and the pooler layer.
60
+ intermediate_size (`int`, *optional*, defaults to 3072):
61
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
62
+ num_hidden_layers (`int`, *optional*, defaults to 12):
63
+ Number of hidden layers in the Transformer encoder.
64
+ num_attention_heads (`int`, *optional*, defaults to 12):
65
+ Number of attention heads for each attention layer in the Transformer encoder.
66
+ num_channels (`int`, *optional*, defaults to 3):
67
+ Number of channels in the input images.
68
+ image_size (`int`, *optional*, defaults to 224):
69
+ The size (resolution) of each image.
70
+ patch_size (`int`, *optional*, defaults to 16):
71
+ The size (resolution) of each patch.
72
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
73
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
74
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
75
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
76
+ The epsilon used by the layer normalization layers.
77
+ attention_dropout (`float`, *optional*, defaults to 0.0):
78
+ The dropout ratio for the attention probabilities.
79
+ Example:
80
+ ```python
81
+ >>> from transformers import SiglipVisionConfig, SiglipVisionModel
82
+ >>> # Initializing a SiglipVisionConfig with google/siglip-base-patch16-224 style configuration
83
+ >>> configuration = SiglipVisionConfig()
84
+ >>> # Initializing a SiglipVisionModel (with random weights) from the google/siglip-base-patch16-224 style configuration
85
+ >>> model = SiglipVisionModel(configuration)
86
+ >>> # Accessing the model configuration
87
+ >>> configuration = model.config
88
+ ```"""
89
+
90
+ model_type = "siglip_vision_model"
91
+
92
+ def __init__(
93
+ self,
94
+ hidden_size=768,
95
+ intermediate_size=3072,
96
+ num_hidden_layers=12,
97
+ num_attention_heads=12,
98
+ num_channels=3,
99
+ image_size=224,
100
+ patch_size=16,
101
+ hidden_act="gelu_pytorch_tanh",
102
+ layer_norm_eps=1e-6,
103
+ attention_dropout=0.0,
104
+ **kwargs,
105
+ ):
106
+ super().__init__(**kwargs)
107
+
108
+ self.hidden_size = hidden_size
109
+ self.intermediate_size = intermediate_size
110
+ self.num_hidden_layers = num_hidden_layers
111
+ self.num_attention_heads = num_attention_heads
112
+ self.num_channels = num_channels
113
+ self.patch_size = patch_size
114
+ self.image_size = image_size
115
+ self.attention_dropout = attention_dropout
116
+ self.layer_norm_eps = layer_norm_eps
117
+ self.hidden_act = hidden_act
118
+
119
+ @classmethod
120
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
121
+ cls._set_token_in_kwargs(kwargs)
122
+
123
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
124
+
125
+ # get the vision config dict if we are loading from SiglipConfig
126
+ if config_dict.get("model_type") == "siglip":
127
+ config_dict = config_dict["vision_config"]
128
+
129
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
130
+ logger.warning(
131
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
132
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
133
+ )
134
+
135
+ return cls.from_dict(config_dict, **kwargs)
136
+
137
+
138
+ _CHECKPOINT_FOR_DOC = "google/siglip-base-patch16-224"
139
+
140
+ SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
141
+ "google/siglip-base-patch16-224",
142
+ # See all SigLIP models at https://huggingface.co/models?filter=siglip
143
+ ]
144
+
145
+ if is_flash_attn_2_available():
146
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
147
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
148
+
149
+
150
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
151
+ def _get_unpad_data(attention_mask):
152
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
153
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
154
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
155
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
156
+ return (
157
+ indices,
158
+ cu_seqlens,
159
+ max_seqlen_in_batch,
160
+ )
161
+
162
+
163
+ def _trunc_normal_(tensor, mean, std, a, b):
164
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
165
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
166
+ def norm_cdf(x):
167
+ # Computes standard normal cumulative distribution function
168
+ return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
169
+
170
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
171
+ warnings.warn(
172
+ "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
173
+ "The distribution of values may be incorrect.",
174
+ stacklevel=2,
175
+ )
176
+
177
+ # Values are generated by using a truncated uniform distribution and
178
+ # then using the inverse CDF for the normal distribution.
179
+ # Get upper and lower cdf values
180
+ l = norm_cdf((a - mean) / std)
181
+ u = norm_cdf((b - mean) / std)
182
+
183
+ # Uniformly fill tensor with values from [l, u], then translate to
184
+ # [2l-1, 2u-1].
185
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
186
+
187
+ # Use inverse cdf transform for normal distribution to get truncated
188
+ # standard normal
189
+ if tensor.dtype in [torch.float16, torch.bfloat16]:
190
+ # The `erfinv_` op is not (yet?) defined in float16+cpu, bfloat16+gpu
191
+ og_dtype = tensor.dtype
192
+ tensor = tensor.to(torch.float32)
193
+ tensor.erfinv_()
194
+ tensor = tensor.to(og_dtype)
195
+ else:
196
+ tensor.erfinv_()
197
+
198
+ # Transform to proper mean, std
199
+ tensor.mul_(std * math.sqrt(2.0))
200
+ tensor.add_(mean)
201
+
202
+ # Clamp to ensure it's in the proper range
203
+ if tensor.dtype == torch.float16:
204
+ # The `clamp_` op is not (yet?) defined in float16+cpu
205
+ tensor = tensor.to(torch.float32)
206
+ tensor.clamp_(min=a, max=b)
207
+ tensor = tensor.to(torch.float16)
208
+ else:
209
+ tensor.clamp_(min=a, max=b)
210
+
211
+
212
+ def trunc_normal_tf_(
213
+ tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0
214
+ ) -> torch.Tensor:
215
+ """Fills the input Tensor with values drawn from a truncated
216
+ normal distribution. The values are effectively drawn from the
217
+ normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)`
218
+ with values outside :math:`[a, b]` redrawn until they are within
219
+ the bounds. The method used for generating the random values works
220
+ best when :math:`a \\leq \text{mean} \\leq b`.
221
+ NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
222
+ bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
223
+ and the result is subsquently scaled and shifted by the mean and std args.
224
+ Args:
225
+ tensor: an n-dimensional `torch.Tensor`
226
+ mean: the mean of the normal distribution
227
+ std: the standard deviation of the normal distribution
228
+ a: the minimum cutoff value
229
+ b: the maximum cutoff value
230
+ """
231
+ with torch.no_grad():
232
+ _trunc_normal_(tensor, 0, 1.0, a, b)
233
+ tensor.mul_(std).add_(mean)
234
+
235
+
236
+ def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
237
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
238
+ if mode == "fan_in":
239
+ denom = fan_in
240
+ elif mode == "fan_out":
241
+ denom = fan_out
242
+ elif mode == "fan_avg":
243
+ denom = (fan_in + fan_out) / 2
244
+
245
+ variance = scale / denom
246
+
247
+ if distribution == "truncated_normal":
248
+ # constant is stddev of standard normal truncated to (-2, 2)
249
+ trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
250
+ elif distribution == "normal":
251
+ with torch.no_grad():
252
+ tensor.normal_(std=math.sqrt(variance))
253
+ elif distribution == "uniform":
254
+ bound = math.sqrt(3 * variance)
255
+ with torch.no_grad():
256
+ tensor.uniform_(-bound, bound)
257
+ else:
258
+ raise ValueError(f"invalid distribution {distribution}")
259
+
260
+
261
+ def lecun_normal_(tensor):
262
+ variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
263
+
264
+
265
+ def default_flax_embed_init(tensor):
266
+ variance_scaling_(tensor, mode="fan_in", distribution="normal")
267
+
268
+
269
+ @dataclass
270
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Siglip
271
+ class SiglipVisionModelOutput(ModelOutput):
272
+ """
273
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
274
+ Args:
275
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
276
+ The image embeddings obtained by applying the projection layer to the pooler_output.
277
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
278
+ Sequence of hidden-states at the output of the last layer of the model.
279
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
280
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
281
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
282
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
283
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
284
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
285
+ sequence_length)`.
286
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
287
+ heads.
288
+ """
289
+
290
+ image_embeds: Optional[torch.FloatTensor] = None
291
+ last_hidden_state: torch.FloatTensor = None
292
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
293
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
294
+
295
+
296
+ class SiglipVisionEmbeddings(nn.Module):
297
+ def __init__(self, config: SiglipVisionConfig):
298
+ super().__init__()
299
+ self.config = config
300
+ self.embed_dim = config.hidden_size
301
+ self.image_size = config.image_size
302
+ self.patch_size = config.patch_size
303
+
304
+ self.patch_embedding = nn.Conv2d(
305
+ in_channels=config.num_channels,
306
+ out_channels=self.embed_dim,
307
+ kernel_size=self.patch_size,
308
+ stride=self.patch_size,
309
+ padding="valid",
310
+ )
311
+
312
+ self.num_patches_per_side = self.image_size // self.patch_size
313
+ self.num_patches = self.num_patches_per_side**2
314
+ self.num_positions = self.num_patches
315
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
316
+
317
+ def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor, tgt_sizes: Optional[torch.IntTensor]=None) -> torch.Tensor:
318
+ batch_size = pixel_values.size(0)
319
+
320
+ patch_embeds = self.patch_embedding(pixel_values)
321
+ embeddings = patch_embeds.flatten(2).transpose(1, 2)
322
+
323
+ max_im_h, max_im_w = pixel_values.size(2), pixel_values.size(3)
324
+ max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size
325
+ boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side)
326
+ position_ids = torch.full(
327
+ size=(
328
+ batch_size,
329
+ max_nb_patches_h * max_nb_patches_w,
330
+ ),
331
+ fill_value=0,
332
+ )
333
+
334
+ for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
335
+ if tgt_sizes is not None:
336
+ nb_patches_h = tgt_sizes[batch_idx][0]
337
+ nb_patches_w = tgt_sizes[batch_idx][1]
338
+ else:
339
+ nb_patches_h = p_attn_mask[:, 0].sum()
340
+ nb_patches_w = p_attn_mask[0].sum()
341
+
342
+ fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h)
343
+ fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w)
344
+
345
+ bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
346
+ bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
347
+
348
+ pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
349
+ position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids
350
+
351
+ position_ids = position_ids.to(self.position_embedding.weight.device)
352
+
353
+ embeddings = embeddings + self.position_embedding(position_ids)
354
+ return embeddings
355
+
356
+
357
+ class SiglipAttention(nn.Module):
358
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
359
+
360
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__
361
+ def __init__(self, config):
362
+ super().__init__()
363
+ self.config = config
364
+ self.embed_dim = config.hidden_size
365
+ self.num_heads = config.num_attention_heads
366
+ self.head_dim = self.embed_dim // self.num_heads
367
+ if self.head_dim * self.num_heads != self.embed_dim:
368
+ raise ValueError(
369
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
370
+ f" {self.num_heads})."
371
+ )
372
+ self.scale = self.head_dim**-0.5
373
+ self.dropout = config.attention_dropout
374
+
375
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
376
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
377
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
378
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
379
+
380
+ def forward(
381
+ self,
382
+ hidden_states: torch.Tensor,
383
+ attention_mask: Optional[torch.Tensor] = None,
384
+ output_attentions: Optional[bool] = False,
385
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
386
+ """Input shape: Batch x Time x Channel"""
387
+
388
+ batch_size, q_len, _ = hidden_states.size()
389
+
390
+ query_states = self.q_proj(hidden_states)
391
+ key_states = self.k_proj(hidden_states)
392
+ value_states = self.v_proj(hidden_states)
393
+
394
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
395
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
396
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
397
+
398
+ k_v_seq_len = key_states.shape[-2]
399
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
400
+
401
+ if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
402
+ raise ValueError(
403
+ f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
404
+ f" {attn_weights.size()}"
405
+ )
406
+
407
+ if attention_mask is not None:
408
+ if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
409
+ raise ValueError(
410
+ f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
411
+ )
412
+ attn_weights = attn_weights + attention_mask
413
+
414
+ # upcast attention to fp32
415
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
416
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
417
+ attn_output = torch.matmul(attn_weights, value_states)
418
+
419
+ if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
420
+ raise ValueError(
421
+ f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
422
+ f" {attn_output.size()}"
423
+ )
424
+
425
+ attn_output = attn_output.transpose(1, 2).contiguous()
426
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
427
+
428
+ attn_output = self.out_proj(attn_output)
429
+
430
+ return attn_output, attn_weights
431
+
432
+
433
+ class SiglipFlashAttention2(SiglipAttention):
434
+ """
435
+ Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
436
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
437
+ flash attention and deal with padding tokens in case the input contains any of them.
438
+ """
439
+
440
+ def __init__(self, *args, **kwargs):
441
+ super().__init__(*args, **kwargs)
442
+ self.is_causal = False # Hack to make sure we don't use a causal mask
443
+
444
+ def forward(
445
+ self,
446
+ hidden_states: torch.Tensor,
447
+ attention_mask: Optional[torch.LongTensor] = None,
448
+ position_ids: Optional[torch.LongTensor] = None,
449
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
450
+ output_attentions: bool = False,
451
+ use_cache: bool = False,
452
+ **kwargs,
453
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
454
+ output_attentions = False
455
+
456
+ bsz, q_len, _ = hidden_states.size()
457
+
458
+ query_states = self.q_proj(hidden_states)
459
+ key_states = self.k_proj(hidden_states)
460
+ value_states = self.v_proj(hidden_states)
461
+
462
+ # Flash attention requires the input to have the shape
463
+ # batch_size x seq_length x head_dim x hidden_dim
464
+ # therefore we just need to keep the original shape
465
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
466
+ key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
467
+ value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
468
+
469
+ kv_seq_len = key_states.shape[-2]
470
+ if past_key_value is not None:
471
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
472
+ # cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
473
+ # query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
474
+
475
+ # if past_key_value is not None:
476
+ # cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
477
+ # key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
478
+
479
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
480
+ # to be able to avoid many of these transpose/reshape/view.
481
+ query_states = query_states.transpose(1, 2)
482
+ key_states = key_states.transpose(1, 2)
483
+ value_states = value_states.transpose(1, 2)
484
+
485
+ dropout_rate = self.dropout if self.training else 0.0
486
+
487
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
488
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
489
+ # cast them back in the correct dtype just to be sure everything works as expected.
490
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
491
+ # in fp32. (LlamaRMSNorm handles it correctly)
492
+
493
+ input_dtype = query_states.dtype
494
+ if input_dtype == torch.float32:
495
+ if torch.is_autocast_enabled():
496
+ target_dtype = torch.get_autocast_gpu_dtype()
497
+ # Handle the case where the model is quantized
498
+ elif hasattr(self.config, "_pre_quantization_dtype"):
499
+ target_dtype = self.config._pre_quantization_dtype
500
+ else:
501
+ target_dtype = self.q_proj.weight.dtype
502
+
503
+ logger.warning_once(
504
+ "The input hidden states seems to be silently casted in float32, this might be related to the fact"
505
+ " you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
506
+ f" {target_dtype}."
507
+ )
508
+
509
+ query_states = query_states.to(target_dtype)
510
+ key_states = key_states.to(target_dtype)
511
+ value_states = value_states.to(target_dtype)
512
+
513
+ attn_output = self._flash_attention_forward(
514
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
515
+ )
516
+
517
+ attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous()
518
+ attn_output = self.out_proj(attn_output)
519
+
520
+ if not output_attentions:
521
+ attn_weights = None
522
+
523
+ return attn_output, attn_weights
524
+
525
+ def _flash_attention_forward(
526
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
527
+ ):
528
+ """
529
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
530
+ first unpad the input, then computes the attention scores and pad the final attention scores.
531
+ Args:
532
+ query_states (`torch.Tensor`):
533
+ Input query states to be passed to Flash Attention API
534
+ key_states (`torch.Tensor`):
535
+ Input key states to be passed to Flash Attention API
536
+ value_states (`torch.Tensor`):
537
+ Input value states to be passed to Flash Attention API
538
+ attention_mask (`torch.Tensor`):
539
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
540
+ position of padding tokens and 1 for the position of non-padding tokens.
541
+ dropout (`int`, *optional*):
542
+ Attention dropout
543
+ softmax_scale (`float`, *optional*):
544
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
545
+ """
546
+
547
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
548
+ causal = self.is_causal and query_length != 1
549
+
550
+ # Contains at least one padding token in the sequence
551
+ if attention_mask is not None:
552
+ batch_size = query_states.shape[0]
553
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
554
+ query_states, key_states, value_states, attention_mask, query_length
555
+ )
556
+
557
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
558
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
559
+
560
+ attn_output_unpad = flash_attn_varlen_func(
561
+ query_states,
562
+ key_states,
563
+ value_states,
564
+ cu_seqlens_q=cu_seqlens_q,
565
+ cu_seqlens_k=cu_seqlens_k,
566
+ max_seqlen_q=max_seqlen_in_batch_q,
567
+ max_seqlen_k=max_seqlen_in_batch_k,
568
+ dropout_p=dropout,
569
+ softmax_scale=softmax_scale,
570
+ causal=causal,
571
+ )
572
+
573
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
574
+ else:
575
+ attn_output = flash_attn_func(
576
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
577
+ )
578
+
579
+ return attn_output
580
+
581
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
582
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
583
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
584
+
585
+ key_layer = index_first_axis(
586
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
587
+ )
588
+ value_layer = index_first_axis(
589
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
590
+ )
591
+ if query_length == kv_seq_len:
592
+ query_layer = index_first_axis(
593
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
594
+ )
595
+ cu_seqlens_q = cu_seqlens_k
596
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
597
+ indices_q = indices_k
598
+ elif query_length == 1:
599
+ max_seqlen_in_batch_q = 1
600
+ cu_seqlens_q = torch.arange(
601
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
602
+ ) # There is a memcpy here, that is very bad.
603
+ indices_q = cu_seqlens_q[:-1]
604
+ query_layer = query_layer.squeeze(1)
605
+ else:
606
+ # The -q_len: slice assumes left padding.
607
+ attention_mask = attention_mask[:, -query_length:]
608
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
609
+
610
+ return (
611
+ query_layer,
612
+ key_layer,
613
+ value_layer,
614
+ indices_q,
615
+ (cu_seqlens_q, cu_seqlens_k),
616
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
617
+ )
618
+
619
+
620
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Siglip
621
+ class SiglipMLP(nn.Module):
622
+ def __init__(self, config):
623
+ super().__init__()
624
+ self.config = config
625
+ self.activation_fn = ACT2FN[config.hidden_act]
626
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
627
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
628
+
629
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
630
+ hidden_states = self.fc1(hidden_states)
631
+ hidden_states = self.activation_fn(hidden_states)
632
+ hidden_states = self.fc2(hidden_states)
633
+ return hidden_states
634
+
635
+
636
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->Siglip
637
+ class SiglipEncoderLayer(nn.Module):
638
+ def __init__(self, config: SiglipVisionConfig):
639
+ super().__init__()
640
+ self.embed_dim = config.hidden_size
641
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
642
+ self.self_attn = (
643
+ SiglipAttention(config)
644
+ if not self._use_flash_attention_2
645
+ else SiglipFlashAttention2(config)
646
+ )
647
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
648
+ self.mlp = SiglipMLP(config)
649
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
650
+
651
+ def forward(
652
+ self,
653
+ hidden_states: torch.Tensor,
654
+ attention_mask: torch.Tensor,
655
+ output_attentions: Optional[bool] = False,
656
+ ) -> Tuple[torch.FloatTensor]:
657
+ """
658
+ Args:
659
+ hidden_states (`torch.FloatTensor`):
660
+ Input to the layer of shape `(batch, seq_len, embed_dim)`.
661
+ attention_mask (`torch.FloatTensor`):
662
+ Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
663
+ output_attentions (`bool`, *optional*, defaults to `False`):
664
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
665
+ returned tensors for more detail.
666
+ """
667
+ residual = hidden_states
668
+
669
+ hidden_states = self.layer_norm1(hidden_states)
670
+ hidden_states, attn_weights = self.self_attn(
671
+ hidden_states=hidden_states,
672
+ attention_mask=attention_mask,
673
+ output_attentions=output_attentions,
674
+ )
675
+ hidden_states = residual + hidden_states
676
+
677
+ residual = hidden_states
678
+ hidden_states = self.layer_norm2(hidden_states)
679
+ hidden_states = self.mlp(hidden_states)
680
+ hidden_states = residual + hidden_states
681
+
682
+ outputs = (hidden_states,)
683
+
684
+ if output_attentions:
685
+ outputs += (attn_weights,)
686
+
687
+ return outputs
688
+
689
+
690
+ class SiglipPreTrainedModel(PreTrainedModel):
691
+ """
692
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
693
+ models.
694
+ """
695
+
696
+ config_class = SiglipVisionConfig
697
+ base_model_prefix = "siglip"
698
+ supports_gradient_checkpointing = True
699
+
700
+ def _init_weights(self, module):
701
+ """Initialize the weights"""
702
+
703
+ if isinstance(module, SiglipVisionEmbeddings):
704
+ width = self.config.hidden_size
705
+ nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
706
+ elif isinstance(module, nn.Embedding):
707
+ default_flax_embed_init(module.weight)
708
+ elif isinstance(module, SiglipAttention):
709
+ nn.init.normal_(module.q_proj.weight)
710
+ nn.init.normal_(module.k_proj.weight)
711
+ nn.init.normal_(module.v_proj.weight)
712
+ nn.init.normal_(module.out_proj.weight)
713
+ nn.init.zeros_(module.q_proj.bias)
714
+ nn.init.zeros_(module.k_proj.bias)
715
+ nn.init.zeros_(module.v_proj.bias)
716
+ nn.init.zeros_(module.out_proj.bias)
717
+ elif isinstance(module, SiglipMLP):
718
+ nn.init.normal_(module.fc1.weight)
719
+ nn.init.normal_(module.fc2.weight)
720
+ nn.init.normal_(module.fc1.bias, std=1e-6)
721
+ nn.init.normal_(module.fc2.bias, std=1e-6)
722
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
723
+ lecun_normal_(module.weight)
724
+ if module.bias is not None:
725
+ nn.init.zeros_(module.bias)
726
+ elif isinstance(module, nn.LayerNorm):
727
+ module.bias.data.zero_()
728
+ module.weight.data.fill_(1.0)
729
+
730
+
731
+ SIGLIP_START_DOCSTRING = r"""
732
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
733
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
734
+ etc.)
735
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
736
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
737
+ and behavior.
738
+ Parameters:
739
+ config ([`SiglipVisionConfig`]): Model configuration class with all the parameters of the model.
740
+ Initializing with a config file does not load the weights associated with the model, only the
741
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
742
+ """
743
+
744
+
745
+ SIGLIP_VISION_INPUTS_DOCSTRING = r"""
746
+ Args:
747
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
748
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
749
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
750
+ output_attentions (`bool`, *optional*):
751
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
752
+ tensors for more detail.
753
+ output_hidden_states (`bool`, *optional*):
754
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
755
+ more detail.
756
+ return_dict (`bool`, *optional*):
757
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
758
+ """
759
+
760
+
761
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Siglip
762
+ class SiglipEncoder(nn.Module):
763
+ """
764
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
765
+ [`SiglipEncoderLayer`].
766
+ Args:
767
+ config: SiglipConfig
768
+ """
769
+
770
+ def __init__(self, config: SiglipVisionConfig):
771
+ super().__init__()
772
+ self.config = config
773
+ self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
774
+ self.gradient_checkpointing = False
775
+
776
+ # Ignore copy
777
+ def forward(
778
+ self,
779
+ inputs_embeds,
780
+ attention_mask: Optional[torch.Tensor] = None,
781
+ output_attentions: Optional[bool] = None,
782
+ output_hidden_states: Optional[bool] = None,
783
+ return_dict: Optional[bool] = None,
784
+ ) -> Union[Tuple, BaseModelOutput]:
785
+ r"""
786
+ Args:
787
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
788
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
789
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
790
+ than the model's internal embedding lookup matrix.
791
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
792
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
793
+ - 1 for tokens that are **not masked**,
794
+ - 0 for tokens that are **masked**.
795
+ [What are attention masks?](../glossary#attention-mask)
796
+ output_attentions (`bool`, *optional*):
797
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
798
+ returned tensors for more detail.
799
+ output_hidden_states (`bool`, *optional*):
800
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
801
+ for more detail.
802
+ return_dict (`bool`, *optional*):
803
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
804
+ """
805
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
806
+ output_hidden_states = (
807
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
808
+ )
809
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
810
+
811
+ encoder_states = () if output_hidden_states else None
812
+ all_attentions = () if output_attentions else None
813
+
814
+ hidden_states = inputs_embeds
815
+ for encoder_layer in self.layers:
816
+ if output_hidden_states:
817
+ encoder_states = encoder_states + (hidden_states,)
818
+ if self.gradient_checkpointing and self.training:
819
+ layer_outputs = self._gradient_checkpointing_func(
820
+ encoder_layer.__call__,
821
+ hidden_states,
822
+ attention_mask,
823
+ output_attentions,
824
+ )
825
+ else:
826
+ layer_outputs = encoder_layer(
827
+ hidden_states,
828
+ attention_mask,
829
+ output_attentions=output_attentions,
830
+ )
831
+
832
+ hidden_states = layer_outputs[0]
833
+
834
+ if output_attentions:
835
+ all_attentions = all_attentions + (layer_outputs[1],)
836
+
837
+ if output_hidden_states:
838
+ encoder_states = encoder_states + (hidden_states,)
839
+
840
+ if not return_dict:
841
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
842
+ return BaseModelOutput(
843
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
844
+ )
845
+
846
+ @add_start_docstrings(
847
+ """The vision model from SigLIP without any head or projection on top.""",
848
+ SIGLIP_START_DOCSTRING
849
+ )
850
+ class SiglipVisionTransformer(SiglipPreTrainedModel):
851
+ config_class = SiglipVisionConfig
852
+ main_input_name = "pixel_values"
853
+ _supports_flash_attn_2 = True
854
+
855
+ def __init__(self, config: SiglipVisionConfig):
856
+ super().__init__(config)
857
+ self.config = config
858
+ embed_dim = config.hidden_size
859
+
860
+ self.embeddings = SiglipVisionEmbeddings(config)
861
+ self.encoder = SiglipEncoder(config)
862
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
863
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
864
+
865
+ # Initialize weights and apply final processing
866
+ self.post_init()
867
+
868
+ def get_input_embeddings(self) -> nn.Module:
869
+ return self.embeddings.patch_embedding
870
+
871
+ @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
872
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig)
873
+ def forward(
874
+ self,
875
+ pixel_values,
876
+ patch_attention_mask: Optional[torch.BoolTensor] = None,
877
+ tgt_sizes: Optional[torch.IntTensor] = None,
878
+ output_attentions: Optional[bool] = None,
879
+ output_hidden_states: Optional[bool] = None,
880
+ return_dict: Optional[bool] = None,
881
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
882
+ r"""
883
+ Returns:
884
+ """
885
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
886
+ output_hidden_states = (
887
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
888
+ )
889
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
890
+
891
+ batch_size = pixel_values.size(0)
892
+ if patch_attention_mask is None:
893
+ patch_attention_mask = torch.ones(
894
+ size=(
895
+ batch_size,
896
+ pixel_values.size(2) // self.config.patch_size,
897
+ pixel_values.size(3) // self.config.patch_size,
898
+ ),
899
+ dtype=torch.bool,
900
+ device=pixel_values.device,
901
+ )
902
+
903
+ hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask, tgt_sizes=tgt_sizes)
904
+
905
+ patch_attention_mask = patch_attention_mask.view(batch_size, -1)
906
+ # The call to `_upad_input` in `_flash_attention_forward` is expensive
907
+ # So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence),
908
+ # avoiding passing the attention_mask, which is equivalent to attending to the full sequence
909
+ if not torch.any(~patch_attention_mask):
910
+ attention_mask=None
911
+ else:
912
+ attention_mask = (
913
+ _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype)
914
+ if not self._use_flash_attention_2
915
+ else patch_attention_mask
916
+ )
917
+
918
+ encoder_outputs = self.encoder(
919
+ inputs_embeds=hidden_states,
920
+ attention_mask=attention_mask,
921
+ output_attentions=output_attentions,
922
+ output_hidden_states=output_hidden_states,
923
+ return_dict=return_dict,
924
+ )
925
+
926
+ last_hidden_state = encoder_outputs[0]
927
+ last_hidden_state = self.post_layernorm(last_hidden_state)
928
+
929
+ if not return_dict:
930
+ return (last_hidden_state, None) + encoder_outputs[1:]
931
+
932
+ return BaseModelOutputWithPooling(
933
+ last_hidden_state=last_hidden_state,
934
+ pooler_output=None,
935
+ hidden_states=encoder_outputs.hidden_states,
936
+ attentions=encoder_outputs.attentions,
937
+ )
checkpoint-4800/resampler.py ADDED
@@ -0,0 +1,782 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Optional, Tuple
3
+ import numpy as np
4
+ import warnings
5
+
6
+ import torch
7
+ from torch import nn
8
+ from torch import Tensor
9
+ import torch.nn.functional as F
10
+ from torch.nn.functional import *
11
+ from torch.nn.modules.activation import *
12
+ from torch.nn.init import trunc_normal_, constant_, xavier_normal_, xavier_uniform_
13
+
14
+ from transformers.integrations import is_deepspeed_zero3_enabled
15
+
16
+ def get_2d_sincos_pos_embed(embed_dim, image_size):
17
+ """
18
+ image_size: image_size or (image_height, image_width)
19
+ return:
20
+ pos_embed: [image_height, image_width, embed_dim]
21
+ """
22
+ if isinstance(image_size, int):
23
+ grid_h_size, grid_w_size = image_size, image_size
24
+ else:
25
+ grid_h_size, grid_w_size = image_size[0], image_size[1]
26
+
27
+ grid_h = np.arange(grid_h_size, dtype=np.float32)
28
+ grid_w = np.arange(grid_w_size, dtype=np.float32)
29
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
30
+ grid = np.stack(grid, axis=0)
31
+
32
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
33
+ return pos_embed
34
+
35
+
36
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
37
+ assert embed_dim % 2 == 0
38
+
39
+ # use half of dimensions to encode grid_h
40
+ emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[0]) # (H, W, D/2)
41
+ emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[1]) # (H, W, D/2)
42
+
43
+ emb = np.concatenate([emb_h, emb_w], axis=-1) # (H, W, D)
44
+ return emb
45
+
46
+
47
+ def get_1d_sincos_pos_embed_from_grid_new(embed_dim, pos):
48
+ """
49
+ embed_dim: output dimension for each position
50
+ pos: a list of positions to be encoded: size (H, W)
51
+ out: (H, W, D)
52
+ """
53
+ assert embed_dim % 2 == 0
54
+ omega = np.arange(embed_dim // 2, dtype=np.float32)
55
+ omega /= embed_dim / 2.
56
+ omega = 1. / 10000 ** omega # (D/2,)
57
+
58
+ out = np.einsum('hw,d->hwd', pos, omega) # (H, W, D/2), outer product
59
+
60
+ emb_sin = np.sin(out) # (H, W, D/2)
61
+ emb_cos = np.cos(out) # (H, W, D/2)
62
+
63
+ emb = np.concatenate([emb_sin, emb_cos], axis=-1) # (H, W, D)
64
+ return emb
65
+
66
+
67
+ class Resampler(nn.Module):
68
+ """
69
+ A 2D perceiver-resampler network with one cross attention layers by
70
+ given learnable queries and 2d sincos pos_emb
71
+ Outputs:
72
+ A tensor with the shape of (batch_size, num_queries, embed_dim)
73
+ """
74
+
75
+ def __init__(
76
+ self,
77
+ num_queries,
78
+ embed_dim,
79
+ num_heads,
80
+ kv_dim=None,
81
+ norm_layer=partial(nn.LayerNorm, eps=1e-6),
82
+ adaptive=False,
83
+ max_size=(70, 70),
84
+ ):
85
+ super().__init__()
86
+ self.num_queries = num_queries
87
+ self.embed_dim = embed_dim
88
+ self.num_heads = num_heads
89
+ self.adaptive = adaptive
90
+ self.max_size = max_size
91
+
92
+ self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
93
+
94
+ if kv_dim is not None and kv_dim != embed_dim:
95
+ self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
96
+ else:
97
+ self.kv_proj = nn.Identity()
98
+
99
+ self.attn = MultiheadAttention(embed_dim, num_heads)
100
+ self.ln_q = norm_layer(embed_dim)
101
+ self.ln_kv = norm_layer(embed_dim)
102
+
103
+ self.ln_post = norm_layer(embed_dim)
104
+ self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim))
105
+
106
+ self._set_2d_pos_cache(self.max_size)
107
+
108
+ def _set_2d_pos_cache(self, max_size, device='cpu'):
109
+ if is_deepspeed_zero3_enabled():
110
+ device='cuda'
111
+ pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.embed_dim, max_size)).float().to(device)
112
+ self.register_buffer("pos_embed", pos_embed, persistent=False)
113
+
114
+ def _adjust_pos_cache(self, tgt_sizes, device):
115
+ max_h = torch.max(tgt_sizes[:, 0])
116
+ max_w = torch.max(tgt_sizes[:, 1])
117
+ if max_h > self.max_size[0] or max_w > self.max_size[1]:
118
+ self.max_size = [max(max_h, self.max_size[0]), max(max_w, self.max_size[1])]
119
+ self._set_2d_pos_cache(self.max_size, device)
120
+
121
+ def _init_weights(self, m):
122
+ if isinstance(m, nn.Linear):
123
+ trunc_normal_(m.weight, std=.02)
124
+ if isinstance(m, nn.Linear) and m.bias is not None:
125
+ nn.init.constant_(m.bias, 0)
126
+ elif isinstance(m, nn.LayerNorm):
127
+ nn.init.constant_(m.bias, 0)
128
+ nn.init.constant_(m.weight, 1.0)
129
+
130
+ def forward(self, x, tgt_sizes=None):
131
+ assert x.shape[0] == tgt_sizes.shape[0]
132
+ bs = x.shape[0]
133
+
134
+ device = x.device
135
+ dtype = x.dtype
136
+
137
+ patch_len = tgt_sizes[:, 0] * tgt_sizes[:, 1]
138
+
139
+ self._adjust_pos_cache(tgt_sizes, device=device)
140
+
141
+ max_patch_len = torch.max(patch_len)
142
+ key_padding_mask = torch.zeros((bs, max_patch_len), dtype=torch.bool, device=device)
143
+
144
+ pos_embed = []
145
+ for i in range(bs):
146
+ tgt_h, tgt_w = tgt_sizes[i]
147
+ pos_embed.append(self.pos_embed[:tgt_h, :tgt_w, :].reshape((tgt_h * tgt_w, -1)).to(dtype)) # patches * D
148
+ key_padding_mask[i, patch_len[i]:] = True
149
+
150
+ pos_embed = torch.nn.utils.rnn.pad_sequence(
151
+ pos_embed, batch_first=True, padding_value=0.0).permute(1, 0, 2) # BLD => L * B * D
152
+
153
+ x = self.kv_proj(x) # B * L * D
154
+ x = self.ln_kv(x).permute(1, 0, 2) # L * B * D
155
+
156
+ q = self.ln_q(self.query) # Q * D
157
+
158
+ out = self.attn(
159
+ self._repeat(q, bs), # Q * B * D
160
+ x + pos_embed, # L * B * D + L * B * D
161
+ x,
162
+ key_padding_mask=key_padding_mask)[0]
163
+ # out: Q * B * D
164
+ x = out.permute(1, 0, 2) # B * Q * D
165
+
166
+ x = self.ln_post(x)
167
+ x = x @ self.proj
168
+ return x
169
+
170
+ def _repeat(self, query, N: int):
171
+ return query.unsqueeze(1).repeat(1, N, 1)
172
+
173
+
174
+ class MultiheadAttention(nn.MultiheadAttention):
175
+ def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False,
176
+ add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None):
177
+ super().__init__(embed_dim, num_heads, dropout, bias, add_bias_kv, add_zero_attn, kdim, vdim, batch_first, device, dtype)
178
+
179
+ # rewrite out_proj layer,with nn.Linear
180
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, device=device, dtype=dtype)
181
+
182
+ def forward(
183
+ self,
184
+ query: Tensor,
185
+ key: Tensor,
186
+ value: Tensor,
187
+ key_padding_mask: Optional[Tensor] = None,
188
+ need_weights: bool = True,
189
+ attn_mask: Optional[Tensor] = None,
190
+ average_attn_weights: bool = True,
191
+ is_causal : bool = False) -> Tuple[Tensor, Optional[Tensor]]:
192
+ why_not_fast_path = ''
193
+ if ((attn_mask is not None and torch.is_floating_point(attn_mask))
194
+ or (key_padding_mask is not None) and torch.is_floating_point(key_padding_mask)):
195
+ why_not_fast_path = "floating-point masks are not supported for fast path."
196
+
197
+ is_batched = query.dim() == 3
198
+
199
+ key_padding_mask = _canonical_mask(
200
+ mask=key_padding_mask,
201
+ mask_name="key_padding_mask",
202
+ other_type=F._none_or_dtype(attn_mask),
203
+ other_name="attn_mask",
204
+ target_type=query.dtype
205
+ )
206
+
207
+ attn_mask = _canonical_mask(
208
+ mask=attn_mask,
209
+ mask_name="attn_mask",
210
+ other_type=None,
211
+ other_name="",
212
+ target_type=query.dtype,
213
+ check_other=False,
214
+ )
215
+
216
+
217
+ if not is_batched:
218
+ why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
219
+ elif query is not key or key is not value:
220
+ # When lifting this restriction, don't forget to either
221
+ # enforce that the dtypes all match or test cases where
222
+ # they don't!
223
+ why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
224
+ elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
225
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
226
+ elif self.in_proj_weight is None:
227
+ why_not_fast_path = "in_proj_weight was None"
228
+ elif query.dtype != self.in_proj_weight.dtype:
229
+ # this case will fail anyway, but at least they'll get a useful error message.
230
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
231
+ elif self.training:
232
+ why_not_fast_path = "training is enabled"
233
+ elif (self.num_heads % 2) != 0:
234
+ why_not_fast_path = "self.num_heads is not even"
235
+ elif not self.batch_first:
236
+ why_not_fast_path = "batch_first was not True"
237
+ elif self.bias_k is not None:
238
+ why_not_fast_path = "self.bias_k was not None"
239
+ elif self.bias_v is not None:
240
+ why_not_fast_path = "self.bias_v was not None"
241
+ elif self.add_zero_attn:
242
+ why_not_fast_path = "add_zero_attn was enabled"
243
+ elif not self._qkv_same_embed_dim:
244
+ why_not_fast_path = "_qkv_same_embed_dim was not True"
245
+ elif query.is_nested and (key_padding_mask is not None or attn_mask is not None):
246
+ why_not_fast_path = "supplying both src_key_padding_mask and src_mask at the same time \
247
+ is not supported with NestedTensor input"
248
+ elif torch.is_autocast_enabled():
249
+ why_not_fast_path = "autocast is enabled"
250
+
251
+ if not why_not_fast_path:
252
+ tensor_args = (
253
+ query,
254
+ key,
255
+ value,
256
+ self.in_proj_weight,
257
+ self.in_proj_bias,
258
+ self.out_proj.weight,
259
+ self.out_proj.bias,
260
+ )
261
+ # We have to use list comprehensions below because TorchScript does not support
262
+ # generator expressions.
263
+ if torch.overrides.has_torch_function(tensor_args):
264
+ why_not_fast_path = "some Tensor argument has_torch_function"
265
+ elif _is_make_fx_tracing():
266
+ why_not_fast_path = "we are running make_fx tracing"
267
+ elif not all(_check_arg_device(x) for x in tensor_args):
268
+ why_not_fast_path = ("some Tensor argument's device is neither one of "
269
+ f"cpu, cuda or {torch.utils.backend_registration._privateuse1_backend_name}")
270
+ elif torch.is_grad_enabled() and any(_arg_requires_grad(x) for x in tensor_args):
271
+ why_not_fast_path = ("grad is enabled and at least one of query or the "
272
+ "input/output projection weights or biases requires_grad")
273
+ if not why_not_fast_path:
274
+ merged_mask, mask_type = self.merge_masks(attn_mask, key_padding_mask, query)
275
+
276
+ if self.in_proj_bias is not None and self.in_proj_weight is not None:
277
+ return torch._native_multi_head_attention(
278
+ query,
279
+ key,
280
+ value,
281
+ self.embed_dim,
282
+ self.num_heads,
283
+ self.in_proj_weight,
284
+ self.in_proj_bias,
285
+ self.out_proj.weight,
286
+ self.out_proj.bias,
287
+ merged_mask,
288
+ need_weights,
289
+ average_attn_weights,
290
+ mask_type)
291
+
292
+ any_nested = query.is_nested or key.is_nested or value.is_nested
293
+ assert not any_nested, ("MultiheadAttention does not support NestedTensor outside of its fast path. " +
294
+ f"The fast path was not hit because {why_not_fast_path}")
295
+
296
+ if self.batch_first and is_batched:
297
+ # make sure that the transpose op does not affect the "is" property
298
+ if key is value:
299
+ if query is key:
300
+ query = key = value = query.transpose(1, 0)
301
+ else:
302
+ query, key = (x.transpose(1, 0) for x in (query, key))
303
+ value = key
304
+ else:
305
+ query, key, value = (x.transpose(1, 0) for x in (query, key, value))
306
+
307
+ if not self._qkv_same_embed_dim:
308
+ attn_output, attn_output_weights = self.multi_head_attention_forward(
309
+ query, key, value, self.embed_dim, self.num_heads,
310
+ self.in_proj_weight, self.in_proj_bias,
311
+ self.bias_k, self.bias_v, self.add_zero_attn,
312
+ self.dropout, self.out_proj.weight, self.out_proj.bias,
313
+ training=self.training,
314
+ key_padding_mask=key_padding_mask, need_weights=need_weights,
315
+ attn_mask=attn_mask,
316
+ use_separate_proj_weight=True,
317
+ q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
318
+ v_proj_weight=self.v_proj_weight,
319
+ average_attn_weights=average_attn_weights,
320
+ is_causal=is_causal)
321
+ else:
322
+ attn_output, attn_output_weights = self.multi_head_attention_forward(
323
+ query, key, value, self.embed_dim, self.num_heads,
324
+ self.in_proj_weight, self.in_proj_bias,
325
+ self.bias_k, self.bias_v, self.add_zero_attn,
326
+ self.dropout, self.out_proj.weight, self.out_proj.bias,
327
+ training=self.training,
328
+ key_padding_mask=key_padding_mask,
329
+ need_weights=need_weights,
330
+ attn_mask=attn_mask,
331
+ average_attn_weights=average_attn_weights,
332
+ is_causal=is_causal)
333
+ if self.batch_first and is_batched:
334
+ return attn_output.transpose(1, 0), attn_output_weights
335
+ else:
336
+ return attn_output, attn_output_weights
337
+
338
+ def multi_head_attention_forward(
339
+ self,
340
+ query: Tensor,
341
+ key: Tensor,
342
+ value: Tensor,
343
+ embed_dim_to_check: int,
344
+ num_heads: int,
345
+ in_proj_weight: Optional[Tensor],
346
+ in_proj_bias: Optional[Tensor],
347
+ bias_k: Optional[Tensor],
348
+ bias_v: Optional[Tensor],
349
+ add_zero_attn: bool,
350
+ dropout_p: float,
351
+ out_proj_weight: Tensor,
352
+ out_proj_bias: Optional[Tensor],
353
+ training: bool = True,
354
+ key_padding_mask: Optional[Tensor] = None,
355
+ need_weights: bool = True,
356
+ attn_mask: Optional[Tensor] = None,
357
+ use_separate_proj_weight: bool = False,
358
+ q_proj_weight: Optional[Tensor] = None,
359
+ k_proj_weight: Optional[Tensor] = None,
360
+ v_proj_weight: Optional[Tensor] = None,
361
+ static_k: Optional[Tensor] = None,
362
+ static_v: Optional[Tensor] = None,
363
+ average_attn_weights: bool = True,
364
+ is_causal: bool = False,
365
+ ) -> Tuple[Tensor, Optional[Tensor]]:
366
+ tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
367
+
368
+ is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads)
369
+
370
+ # For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
371
+ # is batched, run the computation and before returning squeeze the
372
+ # batch dimension so that the output doesn't carry this temporary batch dimension.
373
+ if not is_batched:
374
+ # unsqueeze if the input is unbatched
375
+ query = query.unsqueeze(1)
376
+ key = key.unsqueeze(1)
377
+ value = value.unsqueeze(1)
378
+ if key_padding_mask is not None:
379
+ key_padding_mask = key_padding_mask.unsqueeze(0)
380
+
381
+ # set up shape vars
382
+ tgt_len, bsz, embed_dim = query.shape
383
+ src_len, _, _ = key.shape
384
+
385
+ key_padding_mask = _canonical_mask(
386
+ mask=key_padding_mask,
387
+ mask_name="key_padding_mask",
388
+ other_type=_none_or_dtype(attn_mask),
389
+ other_name="attn_mask",
390
+ target_type=query.dtype
391
+ )
392
+
393
+ if is_causal and attn_mask is None:
394
+ raise RuntimeError(
395
+ "Need attn_mask if specifying the is_causal hint. "
396
+ "You may use the Transformer module method "
397
+ "`generate_square_subsequent_mask` to create this mask."
398
+ )
399
+
400
+ if is_causal and key_padding_mask is None and not need_weights:
401
+ # when we have a kpm or need weights, we need attn_mask
402
+ # Otherwise, we use the is_causal hint go as is_causal
403
+ # indicator to SDPA.
404
+ attn_mask = None
405
+ else:
406
+ attn_mask = _canonical_mask(
407
+ mask=attn_mask,
408
+ mask_name="attn_mask",
409
+ other_type=None,
410
+ other_name="",
411
+ target_type=query.dtype,
412
+ check_other=False,
413
+ )
414
+
415
+ if key_padding_mask is not None:
416
+ # We have the attn_mask, and use that to merge kpm into it.
417
+ # Turn off use of is_causal hint, as the merged mask is no
418
+ # longer causal.
419
+ is_causal = False
420
+
421
+ assert embed_dim == embed_dim_to_check, \
422
+ f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
423
+ if isinstance(embed_dim, torch.Tensor):
424
+ # embed_dim can be a tensor when JIT tracing
425
+ head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
426
+ else:
427
+ head_dim = embed_dim // num_heads
428
+ assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
429
+ if use_separate_proj_weight:
430
+ # allow MHA to have different embedding dimensions when separate projection weights are used
431
+ assert key.shape[:2] == value.shape[:2], \
432
+ f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
433
+ else:
434
+ assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
435
+
436
+ #
437
+ # compute in-projection
438
+ #
439
+ if not use_separate_proj_weight:
440
+ assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None"
441
+ q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
442
+ else:
443
+ assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"
444
+ assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None"
445
+ assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None"
446
+ if in_proj_bias is None:
447
+ b_q = b_k = b_v = None
448
+ else:
449
+ b_q, b_k, b_v = in_proj_bias.chunk(3)
450
+ q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v)
451
+
452
+ # prep attention mask
453
+
454
+ if attn_mask is not None:
455
+ # ensure attn_mask's dim is 3
456
+ if attn_mask.dim() == 2:
457
+ correct_2d_size = (tgt_len, src_len)
458
+ if attn_mask.shape != correct_2d_size:
459
+ raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.")
460
+ attn_mask = attn_mask.unsqueeze(0)
461
+ elif attn_mask.dim() == 3:
462
+ correct_3d_size = (bsz * num_heads, tgt_len, src_len)
463
+ if attn_mask.shape != correct_3d_size:
464
+ raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.")
465
+ else:
466
+ raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
467
+
468
+ # add bias along batch dimension (currently second)
469
+ if bias_k is not None and bias_v is not None:
470
+ assert static_k is None, "bias cannot be added to static key."
471
+ assert static_v is None, "bias cannot be added to static value."
472
+ k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
473
+ v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
474
+ if attn_mask is not None:
475
+ attn_mask = pad(attn_mask, (0, 1))
476
+ if key_padding_mask is not None:
477
+ key_padding_mask = pad(key_padding_mask, (0, 1))
478
+ else:
479
+ assert bias_k is None
480
+ assert bias_v is None
481
+
482
+ #
483
+ # reshape q, k, v for multihead attention and make em batch first
484
+ #
485
+ q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
486
+ if static_k is None:
487
+ k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
488
+ else:
489
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
490
+ assert static_k.size(0) == bsz * num_heads, \
491
+ f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
492
+ assert static_k.size(2) == head_dim, \
493
+ f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
494
+ k = static_k
495
+ if static_v is None:
496
+ v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
497
+ else:
498
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
499
+ assert static_v.size(0) == bsz * num_heads, \
500
+ f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
501
+ assert static_v.size(2) == head_dim, \
502
+ f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
503
+ v = static_v
504
+
505
+ # add zero attention along batch dimension (now first)
506
+ if add_zero_attn:
507
+ zero_attn_shape = (bsz * num_heads, 1, head_dim)
508
+ k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
509
+ v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
510
+ if attn_mask is not None:
511
+ attn_mask = pad(attn_mask, (0, 1))
512
+ if key_padding_mask is not None:
513
+ key_padding_mask = pad(key_padding_mask, (0, 1))
514
+
515
+ # update source sequence length after adjustments
516
+ src_len = k.size(1)
517
+
518
+ # merge key padding and attention masks
519
+ if key_padding_mask is not None:
520
+ assert key_padding_mask.shape == (bsz, src_len), \
521
+ f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
522
+ key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \
523
+ expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)
524
+ if attn_mask is None:
525
+ attn_mask = key_padding_mask
526
+ else:
527
+ attn_mask = attn_mask + key_padding_mask
528
+
529
+ # adjust dropout probability
530
+ if not training:
531
+ dropout_p = 0.0
532
+
533
+ #
534
+ # (deep breath) calculate attention and out projection
535
+ #
536
+
537
+ if need_weights:
538
+ B, Nt, E = q.shape
539
+ q_scaled = q / math.sqrt(E)
540
+
541
+ assert not (is_causal and attn_mask is None), "FIXME: is_causal not implemented for need_weights"
542
+
543
+ if attn_mask is not None:
544
+ attn_output_weights = torch.baddbmm(attn_mask, q_scaled, k.transpose(-2, -1))
545
+ else:
546
+ attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
547
+ attn_output_weights = softmax(attn_output_weights, dim=-1)
548
+ if dropout_p > 0.0:
549
+ attn_output_weights = dropout(attn_output_weights, p=dropout_p)
550
+
551
+ attn_output = torch.bmm(attn_output_weights, v)
552
+
553
+ attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
554
+ attn_output = self.out_proj(attn_output)
555
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
556
+
557
+ # optionally average attention weights over heads
558
+ attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
559
+ if average_attn_weights:
560
+ attn_output_weights = attn_output_weights.mean(dim=1)
561
+
562
+ if not is_batched:
563
+ # squeeze the output if input was unbatched
564
+ attn_output = attn_output.squeeze(1)
565
+ attn_output_weights = attn_output_weights.squeeze(0)
566
+ return attn_output, attn_output_weights
567
+ else:
568
+ # attn_mask can be either (L,S) or (N*num_heads, L, S)
569
+ # if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
570
+ # in order to match the input for SDPA of (N, num_heads, L, S)
571
+ if attn_mask is not None:
572
+ if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
573
+ attn_mask = attn_mask.unsqueeze(0)
574
+ else:
575
+ attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
576
+
577
+ q = q.view(bsz, num_heads, tgt_len, head_dim)
578
+ k = k.view(bsz, num_heads, src_len, head_dim)
579
+ v = v.view(bsz, num_heads, src_len, head_dim)
580
+
581
+ attn_output = F.scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal)
582
+ attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
583
+
584
+ attn_output = self.out_proj(attn_output)
585
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
586
+ if not is_batched:
587
+ # squeeze the output if input was unbatched
588
+ attn_output = attn_output.squeeze(1)
589
+ return attn_output, None
590
+
591
+
592
+ def _mha_shape_check(query: Tensor, key: Tensor, value: Tensor,
593
+ key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], num_heads: int):
594
+ # Verifies the expected shape for `query, `key`, `value`, `key_padding_mask` and `attn_mask`
595
+ # and returns if the input is batched or not.
596
+ # Raises an error if `query` is not 2-D (unbatched) or 3-D (batched) tensor.
597
+
598
+ # Shape check.
599
+ if query.dim() == 3:
600
+ # Batched Inputs
601
+ is_batched = True
602
+ assert key.dim() == 3 and value.dim() == 3, \
603
+ ("For batched (3-D) `query`, expected `key` and `value` to be 3-D"
604
+ f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
605
+ if key_padding_mask is not None:
606
+ assert key_padding_mask.dim() == 2, \
607
+ ("For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D"
608
+ f" but found {key_padding_mask.dim()}-D tensor instead")
609
+ if attn_mask is not None:
610
+ assert attn_mask.dim() in (2, 3), \
611
+ ("For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
612
+ f" but found {attn_mask.dim()}-D tensor instead")
613
+ elif query.dim() == 2:
614
+ # Unbatched Inputs
615
+ is_batched = False
616
+ assert key.dim() == 2 and value.dim() == 2, \
617
+ ("For unbatched (2-D) `query`, expected `key` and `value` to be 2-D"
618
+ f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
619
+
620
+ if key_padding_mask is not None:
621
+ assert key_padding_mask.dim() == 1, \
622
+ ("For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D"
623
+ f" but found {key_padding_mask.dim()}-D tensor instead")
624
+
625
+ if attn_mask is not None:
626
+ assert attn_mask.dim() in (2, 3), \
627
+ ("For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
628
+ f" but found {attn_mask.dim()}-D tensor instead")
629
+ if attn_mask.dim() == 3:
630
+ expected_shape = (num_heads, query.shape[0], key.shape[0])
631
+ assert attn_mask.shape == expected_shape, \
632
+ (f"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}")
633
+ else:
634
+ raise AssertionError(
635
+ f"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor")
636
+
637
+ return is_batched
638
+
639
+
640
+ def _canonical_mask(
641
+ mask: Optional[Tensor],
642
+ mask_name: str,
643
+ other_type: Optional[DType],
644
+ other_name: str,
645
+ target_type: DType,
646
+ check_other: bool = True,
647
+ ) -> Optional[Tensor]:
648
+
649
+ if mask is not None:
650
+ _mask_dtype = mask.dtype
651
+ _mask_is_float = torch.is_floating_point(mask)
652
+ if _mask_dtype != torch.bool and not _mask_is_float:
653
+ raise AssertionError(
654
+ f"only bool and floating types of {mask_name} are supported")
655
+ if check_other and other_type is not None:
656
+ if _mask_dtype != other_type:
657
+ warnings.warn(
658
+ f"Support for mismatched {mask_name} and {other_name} "
659
+ "is deprecated. Use same type for both instead."
660
+ )
661
+ if not _mask_is_float:
662
+ mask = (
663
+ torch.zeros_like(mask, dtype=target_type)
664
+ .masked_fill_(mask, float("-inf"))
665
+ )
666
+ return mask
667
+
668
+
669
+ def _none_or_dtype(input: Optional[Tensor]) -> Optional[DType]:
670
+ if input is None:
671
+ return None
672
+ elif isinstance(input, torch.Tensor):
673
+ return input.dtype
674
+ raise RuntimeError("input to _none_or_dtype() must be None or torch.Tensor")
675
+
676
+ def _in_projection_packed(
677
+ q: Tensor,
678
+ k: Tensor,
679
+ v: Tensor,
680
+ w: Tensor,
681
+ b: Optional[Tensor] = None,
682
+ ) -> List[Tensor]:
683
+ r"""
684
+ Performs the in-projection step of the attention operation, using packed weights.
685
+ Output is a triple containing projection tensors for query, key and value.
686
+ Args:
687
+ q, k, v: query, key and value tensors to be projected. For self-attention,
688
+ these are typically the same tensor; for encoder-decoder attention,
689
+ k and v are typically the same tensor. (We take advantage of these
690
+ identities for performance if they are present.) Regardless, q, k and v
691
+ must share a common embedding dimension; otherwise their shapes may vary.
692
+ w: projection weights for q, k and v, packed into a single tensor. Weights
693
+ are packed along dimension 0, in q, k, v order.
694
+ b: optional projection biases for q, k and v, packed into a single tensor
695
+ in q, k, v order.
696
+ Shape:
697
+ Inputs:
698
+ - q: :math:`(..., E)` where E is the embedding dimension
699
+ - k: :math:`(..., E)` where E is the embedding dimension
700
+ - v: :math:`(..., E)` where E is the embedding dimension
701
+ - w: :math:`(E * 3, E)` where E is the embedding dimension
702
+ - b: :math:`E * 3` where E is the embedding dimension
703
+ Output:
704
+ - in output list :math:`[q', k', v']`, each output tensor will have the
705
+ same shape as the corresponding input tensor.
706
+ """
707
+ E = q.size(-1)
708
+ if k is v:
709
+ if q is k:
710
+ # self-attention
711
+ proj = linear(q, w, b)
712
+ # reshape to 3, E and not E, 3 is deliberate for better memory coalescing and keeping same order as chunk()
713
+ proj = proj.unflatten(-1, (3, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
714
+ return proj[0], proj[1], proj[2]
715
+ else:
716
+ # encoder-decoder attention
717
+ w_q, w_kv = w.split([E, E * 2])
718
+ if b is None:
719
+ b_q = b_kv = None
720
+ else:
721
+ b_q, b_kv = b.split([E, E * 2])
722
+ q_proj = linear(q, w_q, b_q)
723
+ kv_proj = linear(k, w_kv, b_kv)
724
+ # reshape to 2, E and not E, 2 is deliberate for better memory coalescing and keeping same order as chunk()
725
+ kv_proj = kv_proj.unflatten(-1, (2, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
726
+ return (q_proj, kv_proj[0], kv_proj[1])
727
+ else:
728
+ w_q, w_k, w_v = w.chunk(3)
729
+ if b is None:
730
+ b_q = b_k = b_v = None
731
+ else:
732
+ b_q, b_k, b_v = b.chunk(3)
733
+ return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
734
+
735
+
736
+ def _in_projection(
737
+ q: Tensor,
738
+ k: Tensor,
739
+ v: Tensor,
740
+ w_q: Tensor,
741
+ w_k: Tensor,
742
+ w_v: Tensor,
743
+ b_q: Optional[Tensor] = None,
744
+ b_k: Optional[Tensor] = None,
745
+ b_v: Optional[Tensor] = None,
746
+ ) -> Tuple[Tensor, Tensor, Tensor]:
747
+ r"""
748
+ Performs the in-projection step of the attention operation. This is simply
749
+ a triple of linear projections, with shape constraints on the weights which
750
+ ensure embedding dimension uniformity in the projected outputs.
751
+ Output is a triple containing projection tensors for query, key and value.
752
+ Args:
753
+ q, k, v: query, key and value tensors to be projected.
754
+ w_q, w_k, w_v: weights for q, k and v, respectively.
755
+ b_q, b_k, b_v: optional biases for q, k and v, respectively.
756
+ Shape:
757
+ Inputs:
758
+ - q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any
759
+ number of leading dimensions.
760
+ - k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any
761
+ number of leading dimensions.
762
+ - v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any
763
+ number of leading dimensions.
764
+ - w_q: :math:`(Eq, Eq)`
765
+ - w_k: :math:`(Eq, Ek)`
766
+ - w_v: :math:`(Eq, Ev)`
767
+ - b_q: :math:`(Eq)`
768
+ - b_k: :math:`(Eq)`
769
+ - b_v: :math:`(Eq)`
770
+ Output: in output triple :math:`(q', k', v')`,
771
+ - q': :math:`[Qdims..., Eq]`
772
+ - k': :math:`[Kdims..., Eq]`
773
+ - v': :math:`[Vdims..., Eq]`
774
+ """
775
+ Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1)
776
+ assert w_q.shape == (Eq, Eq), f"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}"
777
+ assert w_k.shape == (Eq, Ek), f"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}"
778
+ assert w_v.shape == (Eq, Ev), f"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}"
779
+ assert b_q is None or b_q.shape == (Eq,), f"expecting query bias shape of {(Eq,)}, but got {b_q.shape}"
780
+ assert b_k is None or b_k.shape == (Eq,), f"expecting key bias shape of {(Eq,)}, but got {b_k.shape}"
781
+ assert b_v is None or b_v.shape == (Eq,), f"expecting value bias shape of {(Eq,)}, but got {b_v.shape}"
782
+ return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
checkpoint-4800/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c49abc3bdedbec1fc8e1028ef422150f19ee7470d7b542e1ad8869fc044d2af
3
+ size 15984
checkpoint-4800/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df12ca4106ff0831785a55b5da88f6c86f6f67bd3d09b2dced4f20b539b14f72
3
+ size 15984
checkpoint-4800/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05fc0786faff729a3a1582f98b806b68d4f0b76aebb25cbad4431b73176b11c1
3
+ size 15984
checkpoint-4800/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3117e1218a2dd3f7f8c516a840af48f6b93660d852cca124269f78c21f8577c
3
+ size 15984
checkpoint-4800/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdaa30c82476bf6a65e4eb9ca2ae7b95f1b38f41a6f5b2f1cbdda9af86a4a7a0
3
+ size 15984
checkpoint-4800/rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09cdde6931807139efa184e8a98108b74bb05730bc511336966b254b68dc93ee
3
+ size 15984
checkpoint-4800/rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a2268136932c55b3857d38c7cf3fc4bd3cdad532c156b9addebc6d26374374a
3
+ size 15984
checkpoint-4800/rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56cd9a502015b79e0ab94c92a04bd96c99aaf79ef8d64bf81d81eb702c10c2a8
3
+ size 15984
checkpoint-4800/special_tokens_map.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "",
5
+ "<ref>",
6
+ "</ref>",
7
+ "<box>",
8
+ "</box>",
9
+ "<quad>",
10
+ "</quad>",
11
+ "<point>",
12
+ "</point>",
13
+ "<slice>",
14
+ "</slice>",
15
+ "<image_id>",
16
+ "</image_id>",
17
+ "<|reserved_special_token_0|>",
18
+ "<|reserved_special_token_1|>",
19
+ "<|reserved_special_token_2|>",
20
+ "<|reserved_special_token_3|>",
21
+ "<|reserved_special_token_4|>",
22
+ "<|reserved_special_token_5|>"
23
+ ],
24
+ "bos_token": {
25
+ "content": "<|im_start|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ "eos_token": {
32
+ "content": "<|im_end|>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ },
38
+ "pad_token": {
39
+ "content": "<|endoftext|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false
44
+ },
45
+ "unk_token": {
46
+ "content": "<unk>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false
51
+ }
52
+ }
checkpoint-4800/tokenization_minicpmv_fast.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.models.qwen2 import Qwen2TokenizerFast
2
+
3
+
4
+ class MiniCPMVTokenizerFast(Qwen2TokenizerFast):
5
+ def __init__(self, **kwargs):
6
+ super().__init__(**kwargs)
7
+ self.im_start = ""
9
+ self.ref_start = "<ref>"
10
+ self.ref_end = "</ref>"
11
+ self.box_start = "<box>"
12
+ self.box_end = "</box>"
13
+ self.quad_start = "<quad>"
14
+ self.quad_end = "</quad>"
15
+ self.slice_start = "<slice>"
16
+ self.slice_end = "</slice>"
17
+ self.im_id_start = "<image_id>"
18
+ self.im_id_end = "</image_id>"
19
+
20
+ @property
21
+ def eos_id(self):
22
+ return self.eos_token_id
23
+
24
+ @property
25
+ def bos_id(self):
26
+ return self.bos_token_id
27
+
28
+ @property
29
+ def unk_id(self):
30
+ return self.unk_token_id
31
+
32
+ @property
33
+ def im_start_id(self):
34
+ return self.convert_tokens_to_ids(self.im_start)
35
+
36
+ @property
37
+ def im_end_id(self):
38
+ return self.convert_tokens_to_ids(self.im_end)
39
+
40
+ @property
41
+ def slice_start_id(self):
42
+ return self.convert_tokens_to_ids(self.slice_start)
43
+
44
+ @property
45
+ def slice_end_id(self):
46
+ return self.convert_tokens_to_ids(self.slice_end)
47
+
48
+ @property
49
+ def im_id_start_id(self):
50
+ return self.convert_tokens_to_ids(self.im_id_start)
51
+
52
+ @property
53
+ def im_id_end_id(self):
54
+ return self.convert_tokens_to_ids(self.im_id_end)
55
+
56
+ @property
57
+ def newline_id(self):
58
+ return self.convert_tokens_to_ids('\n')
59
+
60
+ @staticmethod
61
+ def escape(text: str) -> str:
62
+ return text
63
+
64
+ @staticmethod
65
+ def unescape(text: str) -> str:
66
+ return text
checkpoint-4800/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4800/tokenizer_config.json ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "128244": {
5
+ "content": "<unk>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151643": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151644": {
21
+ "content": "<|im_start|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151645": {
29
+ "content": "<|im_end|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "151646": {
37
+ "content": "",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "151648": {
53
+ "content": "<ref>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "151649": {
61
+ "content": "</ref>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "151650": {
69
+ "content": "<box>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "151651": {
77
+ "content": "</box>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "151652": {
85
+ "content": "<quad>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "151653": {
93
+ "content": "</quad>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "151654": {
101
+ "content": "<point>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "151655": {
109
+ "content": "</point>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "151656": {
117
+ "content": "<slice>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "151657": {
125
+ "content": "</slice>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "151658": {
133
+ "content": "<image_id>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ },
140
+ "151659": {
141
+ "content": "</image_id>",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": true
147
+ },
148
+ "151660": {
149
+ "content": "<|reserved_special_token_0|>",
150
+ "lstrip": false,
151
+ "normalized": false,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": true
155
+ },
156
+ "151661": {
157
+ "content": "<|reserved_special_token_1|>",
158
+ "lstrip": false,
159
+ "normalized": false,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": true
163
+ },
164
+ "151662": {
165
+ "content": "<|reserved_special_token_2|>",
166
+ "lstrip": false,
167
+ "normalized": false,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": true
171
+ },
172
+ "151663": {
173
+ "content": "<|reserved_special_token_3|>",
174
+ "lstrip": false,
175
+ "normalized": false,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": true
179
+ },
180
+ "151664": {
181
+ "content": "<|reserved_special_token_4|>",
182
+ "lstrip": false,
183
+ "normalized": false,
184
+ "rstrip": false,
185
+ "single_word": false,
186
+ "special": true
187
+ },
188
+ "151665": {
189
+ "content": "<|reserved_special_token_5|>",
190
+ "lstrip": false,
191
+ "normalized": false,
192
+ "rstrip": false,
193
+ "single_word": false,
194
+ "special": true
195
+ }
196
+ },
197
+ "additional_special_tokens": [
198
+ "",
200
+ "<ref>",
201
+ "</ref>",
202
+ "<box>",
203
+ "</box>",
204
+ "<quad>",
205
+ "</quad>",
206
+ "<point>",
207
+ "</point>",
208
+ "<slice>",
209
+ "</slice>",
210
+ "<image_id>",
211
+ "</image_id>",
212
+ "<|reserved_special_token_0|>",
213
+ "<|reserved_special_token_1|>",
214
+ "<|reserved_special_token_2|>",
215
+ "<|reserved_special_token_3|>",
216
+ "<|reserved_special_token_4|>",
217
+ "<|reserved_special_token_5|>"
218
+ ],
219
+ "auto_map": {
220
+ "AutoTokenizer": [
221
+ "tokenization_qwen2.Qwen2Tokenizer",
222
+ "tokenization_minicpmv_fast.MiniCPMVTokenizerFast"
223
+ ]
224
+ },
225
+ "bos_token": "<|im_start|>",
226
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
227
+ "clean_up_tokenization_spaces": false,
228
+ "eos_token": "<|im_end|>",
229
+ "errors": "replace",
230
+ "model_max_length": 1000000000000000019884624838656,
231
+ "pad_token": "<|endoftext|>",
232
+ "split_special_tokens": false,
233
+ "tokenizer_class": "MiniCPMVTokenizer",
234
+ "unk_token": "<unk>"
235
+ }
checkpoint-4800/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4800/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef488c56f09afccd90df2cbfc351856b9933eb61d96e4c0f699df901bfd8debc
3
+ size 7672
checkpoint-4800/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4800/zero_to_fp32.py ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage <= 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dict = torch.load(f, map_location=device)
147
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
148
+ # and also handle the case where it was already removed by another helper script
149
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
150
+ state_dicts.append(state_dict)
151
+
152
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
153
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
154
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
155
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
156
+
157
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
158
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
159
+ # use the max of the partition_count to get the dp world_size.
160
+
161
+ if type(world_size) is list:
162
+ world_size = max(world_size)
163
+
164
+ if world_size != total_files:
165
+ raise ValueError(
166
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
167
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
168
+ )
169
+
170
+ # the groups are named differently in each stage
171
+ if zero_stage <= 2:
172
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
173
+ elif zero_stage == 3:
174
+ fp32_groups_key = FP32_FLAT_GROUPS
175
+ else:
176
+ raise ValueError(f"unknown zero stage {zero_stage}")
177
+
178
+ if zero_stage <= 2:
179
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
180
+ elif zero_stage == 3:
181
+ # if there is more than one param group, there will be multiple flattened tensors - one
182
+ # flattened tensor per group - for simplicity merge them into a single tensor
183
+ #
184
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
185
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
186
+
187
+ fp32_flat_groups = [
188
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
189
+ ]
190
+
191
+ return zero_stage, world_size, fp32_flat_groups
192
+
193
+
194
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
195
+ """
196
+ Returns fp32 state_dict reconstructed from ds checkpoint
197
+
198
+ Args:
199
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
200
+
201
+ """
202
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
203
+
204
+ optim_files = get_optim_files(ds_checkpoint_dir)
205
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
206
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
207
+
208
+ model_files = get_model_state_files(ds_checkpoint_dir)
209
+
210
+ zero_model_states = parse_model_states(model_files)
211
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
212
+
213
+ if zero_stage <= 2:
214
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
215
+ exclude_frozen_parameters)
216
+ elif zero_stage == 3:
217
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
218
+ exclude_frozen_parameters)
219
+
220
+
221
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
222
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
223
+ return
224
+
225
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
226
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
227
+
228
+ if debug:
229
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
230
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
231
+
232
+ wanted_params = len(frozen_param_shapes)
233
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
234
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
235
+ print(f'Frozen params: Have {avail_numel} numels to process.')
236
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
237
+
238
+ total_params = 0
239
+ total_numel = 0
240
+ for name, shape in frozen_param_shapes.items():
241
+ total_params += 1
242
+ unpartitioned_numel = shape.numel()
243
+ total_numel += unpartitioned_numel
244
+
245
+ state_dict[name] = frozen_param_fragments[name]
246
+
247
+ if debug:
248
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
249
+
250
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
251
+
252
+
253
+ def _has_callable(obj, fn):
254
+ attr = getattr(obj, fn, None)
255
+ return callable(attr)
256
+
257
+
258
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
259
+ param_shapes = zero_model_states[0].param_shapes
260
+
261
+ # Reconstruction protocol:
262
+ #
263
+ # XXX: document this
264
+
265
+ if debug:
266
+ for i in range(world_size):
267
+ for j in range(len(fp32_flat_groups[0])):
268
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
269
+
270
+ # XXX: memory usage doubles here (zero2)
271
+ num_param_groups = len(fp32_flat_groups[0])
272
+ merged_single_partition_of_fp32_groups = []
273
+ for i in range(num_param_groups):
274
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
275
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
276
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
277
+ avail_numel = sum(
278
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
279
+
280
+ if debug:
281
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
282
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
283
+ # not asserting if there is a mismatch due to possible padding
284
+ print(f"Have {avail_numel} numels to process.")
285
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
286
+
287
+ # params
288
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
289
+ # out-of-core computing solution
290
+ total_numel = 0
291
+ total_params = 0
292
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
293
+ offset = 0
294
+ avail_numel = full_single_fp32_vector.numel()
295
+ for name, shape in shapes.items():
296
+
297
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
298
+ total_numel += unpartitioned_numel
299
+ total_params += 1
300
+
301
+ if debug:
302
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
303
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
304
+ offset += unpartitioned_numel
305
+
306
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
307
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
308
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
309
+ # live optimizer object, so we are checking that the numbers are within the right range
310
+ align_to = 2 * world_size
311
+
312
+ def zero2_align(x):
313
+ return align_to * math.ceil(x / align_to)
314
+
315
+ if debug:
316
+ print(f"original offset={offset}, avail_numel={avail_numel}")
317
+
318
+ offset = zero2_align(offset)
319
+ avail_numel = zero2_align(avail_numel)
320
+
321
+ if debug:
322
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
323
+
324
+ # Sanity check
325
+ if offset != avail_numel:
326
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
327
+
328
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
329
+
330
+
331
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
332
+ exclude_frozen_parameters):
333
+ state_dict = OrderedDict()
334
+
335
+ # buffers
336
+ buffers = zero_model_states[0].buffers
337
+ state_dict.update(buffers)
338
+ if debug:
339
+ print(f"added {len(buffers)} buffers")
340
+
341
+ if not exclude_frozen_parameters:
342
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
343
+
344
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
345
+
346
+ # recover shared parameters
347
+ for pair in zero_model_states[0].shared_params:
348
+ if pair[1] in state_dict:
349
+ state_dict[pair[0]] = state_dict[pair[1]]
350
+
351
+ return state_dict
352
+
353
+
354
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
355
+ remainder = unpartitioned_numel % world_size
356
+ padding_numel = (world_size - remainder) if remainder else 0
357
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
358
+ return partitioned_numel, padding_numel
359
+
360
+
361
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
362
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
363
+ return
364
+
365
+ if debug:
366
+ for i in range(world_size):
367
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
368
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
369
+
370
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
371
+ wanted_params = len(frozen_param_shapes)
372
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
373
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
374
+ print(f'Frozen params: Have {avail_numel} numels to process.')
375
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
376
+
377
+ total_params = 0
378
+ total_numel = 0
379
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
380
+ total_params += 1
381
+ unpartitioned_numel = shape.numel()
382
+ total_numel += unpartitioned_numel
383
+
384
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
385
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
386
+
387
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
388
+
389
+ if debug:
390
+ print(
391
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
392
+ )
393
+
394
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
395
+
396
+
397
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
398
+ param_shapes = zero_model_states[0].param_shapes
399
+ avail_numel = fp32_flat_groups[0].numel() * world_size
400
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
401
+ # param, re-consolidating each param, while dealing with padding if any
402
+
403
+ # merge list of dicts, preserving order
404
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
405
+
406
+ if debug:
407
+ for i in range(world_size):
408
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
409
+
410
+ wanted_params = len(param_shapes)
411
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
412
+ # not asserting if there is a mismatch due to possible padding
413
+ avail_numel = fp32_flat_groups[0].numel() * world_size
414
+ print(f"Trainable params: Have {avail_numel} numels to process.")
415
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
416
+
417
+ # params
418
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
419
+ # out-of-core computing solution
420
+ offset = 0
421
+ total_numel = 0
422
+ total_params = 0
423
+ for name, shape in param_shapes.items():
424
+
425
+ unpartitioned_numel = shape.numel()
426
+ total_numel += unpartitioned_numel
427
+ total_params += 1
428
+
429
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
430
+
431
+ if debug:
432
+ print(
433
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
434
+ )
435
+
436
+ # XXX: memory usage doubles here
437
+ state_dict[name] = torch.cat(
438
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
439
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
440
+ offset += partitioned_numel
441
+
442
+ offset *= world_size
443
+
444
+ # Sanity check
445
+ if offset != avail_numel:
446
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
447
+
448
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
449
+
450
+
451
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
452
+ exclude_frozen_parameters):
453
+ state_dict = OrderedDict()
454
+
455
+ # buffers
456
+ buffers = zero_model_states[0].buffers
457
+ state_dict.update(buffers)
458
+ if debug:
459
+ print(f"added {len(buffers)} buffers")
460
+
461
+ if not exclude_frozen_parameters:
462
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
463
+
464
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
465
+
466
+ # recover shared parameters
467
+ for pair in zero_model_states[0].shared_params:
468
+ if pair[1] in state_dict:
469
+ state_dict[pair[0]] = state_dict[pair[1]]
470
+
471
+ return state_dict
472
+
473
+
474
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
475
+ """
476
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
477
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
478
+ via a model hub.
479
+
480
+ Args:
481
+ - ``checkpoint_dir``: path to the desired checkpoint folder
482
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
483
+ - ``exclude_frozen_parameters``: exclude frozen parameters
484
+
485
+ Returns:
486
+ - pytorch ``state_dict``
487
+
488
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
489
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
490
+ the checkpoint.
491
+
492
+ A typical usage might be ::
493
+
494
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
495
+ # do the training and checkpoint saving
496
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
497
+ model = model.cpu() # move to cpu
498
+ model.load_state_dict(state_dict)
499
+ # submit to model hub or save the model to share with others
500
+
501
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
502
+ application. i.e. you will need to re-initialize the deepspeed engine, since
503
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
504
+
505
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
506
+
507
+ """
508
+ if tag is None:
509
+ latest_path = os.path.join(checkpoint_dir, 'latest')
510
+ if os.path.isfile(latest_path):
511
+ with open(latest_path, 'r') as fd:
512
+ tag = fd.read().strip()
513
+ else:
514
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
515
+
516
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
517
+
518
+ if not os.path.isdir(ds_checkpoint_dir):
519
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
520
+
521
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
522
+
523
+
524
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
525
+ """
526
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
527
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
528
+
529
+ Args:
530
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
531
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
532
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
533
+ - ``exclude_frozen_parameters``: exclude frozen parameters
534
+ """
535
+
536
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
537
+ print(f"Saving fp32 state dict to {output_file}")
538
+ torch.save(state_dict, output_file)
539
+
540
+
541
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
542
+ """
543
+ 1. Put the provided model to cpu
544
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
545
+ 3. Load it into the provided model
546
+
547
+ Args:
548
+ - ``model``: the model object to update
549
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
550
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
551
+
552
+ Returns:
553
+ - ``model`: modified model
554
+
555
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
556
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
557
+ conveniently placed for you in the checkpoint folder.
558
+
559
+ A typical usage might be ::
560
+
561
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
562
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
563
+ # submit to model hub or save the model to share with others
564
+
565
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
566
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
567
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
568
+
569
+ """
570
+ logger.info(f"Extracting fp32 weights")
571
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
572
+
573
+ logger.info(f"Overwriting model with fp32 weights")
574
+ model = model.cpu()
575
+ model.load_state_dict(state_dict, strict=False)
576
+
577
+ return model
578
+
579
+
580
+ if __name__ == "__main__":
581
+
582
+ parser = argparse.ArgumentParser()
583
+ parser.add_argument("checkpoint_dir",
584
+ type=str,
585
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
586
+ parser.add_argument(
587
+ "output_file",
588
+ type=str,
589
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
590
+ parser.add_argument("-t",
591
+ "--tag",
592
+ type=str,
593
+ default=None,
594
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
595
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
596
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
597
+ args = parser.parse_args()
598
+
599
+ debug = args.debug
600
+
601
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
602
+ args.output_file,
603
+ tag=args.tag,
604
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
checkpoint-5000/added_tokens.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</box>": 151651,
3
+ "</image>": 151647,
4
+ "</image_id>": 151659,
5
+ "</point>": 151655,
6
+ "</quad>": 151653,
7
+ "</ref>": 151649,
8
+ "</slice>": 151657,
9
+ "<box>": 151650,
10
+ ")")
337
+ elif isinstance(c, str):
338
+ cur_msgs.append(c)
339
+ msg["content"] = "\n".join(cur_msgs)
340
+
341
+ if system_prompt:
342
+ sys_msg = {'role': 'system', 'content': system_prompt}
343
+ copy_msgs = [sys_msg] + copy_msgs
344
+
345
+ prompts_lists.append(processor.tokenizer.apply_chat_template(copy_msgs, tokenize=False, add_generation_prompt=True))
346
+ input_images_lists.append(images)
347
+
348
+ inputs = processor(
349
+ prompts_lists,
350
+ input_images_lists,
351
+ max_slice_nums=max_slice_nums,
352
+ use_image_id=use_image_id,
353
+ return_tensors="pt",
354
+ max_length=max_inp_length
355
+ ).to(self.device)
356
+
357
+ if sampling:
358
+ generation_config = {
359
+ "top_p": 0.8,
360
+ "top_k": 100,
361
+ "temperature": 0.7,
362
+ "do_sample": True,
363
+ "repetition_penalty": 1.05
364
+ }
365
+ else:
366
+ generation_config = {
367
+ "num_beams": 3,
368
+ "repetition_penalty": 1.2,
369
+ }
370
+
371
+ if min_new_tokens > 0:
372
+ generation_config['min_new_tokens'] = min_new_tokens
373
+
374
+ generation_config.update(
375
+ (k, kwargs[k]) for k in generation_config.keys() & kwargs.keys()
376
+ )
377
+
378
+ inputs.pop("image_sizes")
379
+ with torch.inference_mode():
380
+ res = self.generate(
381
+ **inputs,
382
+ tokenizer=tokenizer,
383
+ max_new_tokens=max_new_tokens,
384
+ vision_hidden_states=vision_hidden_states,
385
+ stream=stream,
386
+ decode_text=True,
387
+ **generation_config
388
+ )
389
+
390
+ if stream:
391
+ def stream_gen():
392
+ for text in res:
393
+ for term in self.terminators:
394
+ text = text.replace(term, '')
395
+ yield text
396
+ return stream_gen()
397
+
398
+ else:
399
+ if batched:
400
+ answer = res
401
+ else:
402
+ answer = res[0]
403
+ return answer
checkpoint-5000/modeling_navit_siglip.py ADDED
@@ -0,0 +1,937 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Google AI and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Siglip model. """
16
+ # Copied from HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit and add tgt_sizes
17
+
18
+
19
+ import os
20
+ import math
21
+ import warnings
22
+ from dataclasses import dataclass
23
+ from typing import Any, Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.utils.checkpoint
29
+ from torch import nn
30
+ from torch.nn.init import _calculate_fan_in_and_fan_out
31
+
32
+ from transformers.activations import ACT2FN
33
+ from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
34
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
35
+ from transformers.modeling_utils import PreTrainedModel
36
+ from transformers.configuration_utils import PretrainedConfig
37
+ from transformers.utils import (
38
+ ModelOutput,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ is_flash_attn_2_available,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from transformers.utils import logging
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ class SiglipVisionConfig(PretrainedConfig):
50
+ r"""
51
+ This is the configuration class to store the configuration of a [`SiglipVisionModel`]. It is used to instantiate a
52
+ Siglip vision encoder according to the specified arguments, defining the model architecture. Instantiating a
53
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip
54
+ [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.
55
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
56
+ documentation from [`PretrainedConfig`] for more information.
57
+ Args:
58
+ hidden_size (`int`, *optional*, defaults to 768):
59
+ Dimensionality of the encoder layers and the pooler layer.
60
+ intermediate_size (`int`, *optional*, defaults to 3072):
61
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
62
+ num_hidden_layers (`int`, *optional*, defaults to 12):
63
+ Number of hidden layers in the Transformer encoder.
64
+ num_attention_heads (`int`, *optional*, defaults to 12):
65
+ Number of attention heads for each attention layer in the Transformer encoder.
66
+ num_channels (`int`, *optional*, defaults to 3):
67
+ Number of channels in the input images.
68
+ image_size (`int`, *optional*, defaults to 224):
69
+ The size (resolution) of each image.
70
+ patch_size (`int`, *optional*, defaults to 16):
71
+ The size (resolution) of each patch.
72
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
73
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
74
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
75
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
76
+ The epsilon used by the layer normalization layers.
77
+ attention_dropout (`float`, *optional*, defaults to 0.0):
78
+ The dropout ratio for the attention probabilities.
79
+ Example:
80
+ ```python
81
+ >>> from transformers import SiglipVisionConfig, SiglipVisionModel
82
+ >>> # Initializing a SiglipVisionConfig with google/siglip-base-patch16-224 style configuration
83
+ >>> configuration = SiglipVisionConfig()
84
+ >>> # Initializing a SiglipVisionModel (with random weights) from the google/siglip-base-patch16-224 style configuration
85
+ >>> model = SiglipVisionModel(configuration)
86
+ >>> # Accessing the model configuration
87
+ >>> configuration = model.config
88
+ ```"""
89
+
90
+ model_type = "siglip_vision_model"
91
+
92
+ def __init__(
93
+ self,
94
+ hidden_size=768,
95
+ intermediate_size=3072,
96
+ num_hidden_layers=12,
97
+ num_attention_heads=12,
98
+ num_channels=3,
99
+ image_size=224,
100
+ patch_size=16,
101
+ hidden_act="gelu_pytorch_tanh",
102
+ layer_norm_eps=1e-6,
103
+ attention_dropout=0.0,
104
+ **kwargs,
105
+ ):
106
+ super().__init__(**kwargs)
107
+
108
+ self.hidden_size = hidden_size
109
+ self.intermediate_size = intermediate_size
110
+ self.num_hidden_layers = num_hidden_layers
111
+ self.num_attention_heads = num_attention_heads
112
+ self.num_channels = num_channels
113
+ self.patch_size = patch_size
114
+ self.image_size = image_size
115
+ self.attention_dropout = attention_dropout
116
+ self.layer_norm_eps = layer_norm_eps
117
+ self.hidden_act = hidden_act
118
+
119
+ @classmethod
120
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
121
+ cls._set_token_in_kwargs(kwargs)
122
+
123
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
124
+
125
+ # get the vision config dict if we are loading from SiglipConfig
126
+ if config_dict.get("model_type") == "siglip":
127
+ config_dict = config_dict["vision_config"]
128
+
129
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
130
+ logger.warning(
131
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
132
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
133
+ )
134
+
135
+ return cls.from_dict(config_dict, **kwargs)
136
+
137
+
138
+ _CHECKPOINT_FOR_DOC = "google/siglip-base-patch16-224"
139
+
140
+ SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
141
+ "google/siglip-base-patch16-224",
142
+ # See all SigLIP models at https://huggingface.co/models?filter=siglip
143
+ ]
144
+
145
+ if is_flash_attn_2_available():
146
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
147
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
148
+
149
+
150
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
151
+ def _get_unpad_data(attention_mask):
152
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
153
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
154
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
155
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
156
+ return (
157
+ indices,
158
+ cu_seqlens,
159
+ max_seqlen_in_batch,
160
+ )
161
+
162
+
163
+ def _trunc_normal_(tensor, mean, std, a, b):
164
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
165
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
166
+ def norm_cdf(x):
167
+ # Computes standard normal cumulative distribution function
168
+ return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
169
+
170
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
171
+ warnings.warn(
172
+ "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
173
+ "The distribution of values may be incorrect.",
174
+ stacklevel=2,
175
+ )
176
+
177
+ # Values are generated by using a truncated uniform distribution and
178
+ # then using the inverse CDF for the normal distribution.
179
+ # Get upper and lower cdf values
180
+ l = norm_cdf((a - mean) / std)
181
+ u = norm_cdf((b - mean) / std)
182
+
183
+ # Uniformly fill tensor with values from [l, u], then translate to
184
+ # [2l-1, 2u-1].
185
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
186
+
187
+ # Use inverse cdf transform for normal distribution to get truncated
188
+ # standard normal
189
+ if tensor.dtype in [torch.float16, torch.bfloat16]:
190
+ # The `erfinv_` op is not (yet?) defined in float16+cpu, bfloat16+gpu
191
+ og_dtype = tensor.dtype
192
+ tensor = tensor.to(torch.float32)
193
+ tensor.erfinv_()
194
+ tensor = tensor.to(og_dtype)
195
+ else:
196
+ tensor.erfinv_()
197
+
198
+ # Transform to proper mean, std
199
+ tensor.mul_(std * math.sqrt(2.0))
200
+ tensor.add_(mean)
201
+
202
+ # Clamp to ensure it's in the proper range
203
+ if tensor.dtype == torch.float16:
204
+ # The `clamp_` op is not (yet?) defined in float16+cpu
205
+ tensor = tensor.to(torch.float32)
206
+ tensor.clamp_(min=a, max=b)
207
+ tensor = tensor.to(torch.float16)
208
+ else:
209
+ tensor.clamp_(min=a, max=b)
210
+
211
+
212
+ def trunc_normal_tf_(
213
+ tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0
214
+ ) -> torch.Tensor:
215
+ """Fills the input Tensor with values drawn from a truncated
216
+ normal distribution. The values are effectively drawn from the
217
+ normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)`
218
+ with values outside :math:`[a, b]` redrawn until they are within
219
+ the bounds. The method used for generating the random values works
220
+ best when :math:`a \\leq \text{mean} \\leq b`.
221
+ NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
222
+ bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
223
+ and the result is subsquently scaled and shifted by the mean and std args.
224
+ Args:
225
+ tensor: an n-dimensional `torch.Tensor`
226
+ mean: the mean of the normal distribution
227
+ std: the standard deviation of the normal distribution
228
+ a: the minimum cutoff value
229
+ b: the maximum cutoff value
230
+ """
231
+ with torch.no_grad():
232
+ _trunc_normal_(tensor, 0, 1.0, a, b)
233
+ tensor.mul_(std).add_(mean)
234
+
235
+
236
+ def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
237
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
238
+ if mode == "fan_in":
239
+ denom = fan_in
240
+ elif mode == "fan_out":
241
+ denom = fan_out
242
+ elif mode == "fan_avg":
243
+ denom = (fan_in + fan_out) / 2
244
+
245
+ variance = scale / denom
246
+
247
+ if distribution == "truncated_normal":
248
+ # constant is stddev of standard normal truncated to (-2, 2)
249
+ trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
250
+ elif distribution == "normal":
251
+ with torch.no_grad():
252
+ tensor.normal_(std=math.sqrt(variance))
253
+ elif distribution == "uniform":
254
+ bound = math.sqrt(3 * variance)
255
+ with torch.no_grad():
256
+ tensor.uniform_(-bound, bound)
257
+ else:
258
+ raise ValueError(f"invalid distribution {distribution}")
259
+
260
+
261
+ def lecun_normal_(tensor):
262
+ variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
263
+
264
+
265
+ def default_flax_embed_init(tensor):
266
+ variance_scaling_(tensor, mode="fan_in", distribution="normal")
267
+
268
+
269
+ @dataclass
270
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Siglip
271
+ class SiglipVisionModelOutput(ModelOutput):
272
+ """
273
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
274
+ Args:
275
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
276
+ The image embeddings obtained by applying the projection layer to the pooler_output.
277
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
278
+ Sequence of hidden-states at the output of the last layer of the model.
279
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
280
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
281
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
282
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
283
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
284
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
285
+ sequence_length)`.
286
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
287
+ heads.
288
+ """
289
+
290
+ image_embeds: Optional[torch.FloatTensor] = None
291
+ last_hidden_state: torch.FloatTensor = None
292
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
293
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
294
+
295
+
296
+ class SiglipVisionEmbeddings(nn.Module):
297
+ def __init__(self, config: SiglipVisionConfig):
298
+ super().__init__()
299
+ self.config = config
300
+ self.embed_dim = config.hidden_size
301
+ self.image_size = config.image_size
302
+ self.patch_size = config.patch_size
303
+
304
+ self.patch_embedding = nn.Conv2d(
305
+ in_channels=config.num_channels,
306
+ out_channels=self.embed_dim,
307
+ kernel_size=self.patch_size,
308
+ stride=self.patch_size,
309
+ padding="valid",
310
+ )
311
+
312
+ self.num_patches_per_side = self.image_size // self.patch_size
313
+ self.num_patches = self.num_patches_per_side**2
314
+ self.num_positions = self.num_patches
315
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
316
+
317
+ def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor, tgt_sizes: Optional[torch.IntTensor]=None) -> torch.Tensor:
318
+ batch_size = pixel_values.size(0)
319
+
320
+ patch_embeds = self.patch_embedding(pixel_values)
321
+ embeddings = patch_embeds.flatten(2).transpose(1, 2)
322
+
323
+ max_im_h, max_im_w = pixel_values.size(2), pixel_values.size(3)
324
+ max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size
325
+ boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side)
326
+ position_ids = torch.full(
327
+ size=(
328
+ batch_size,
329
+ max_nb_patches_h * max_nb_patches_w,
330
+ ),
331
+ fill_value=0,
332
+ )
333
+
334
+ for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
335
+ if tgt_sizes is not None:
336
+ nb_patches_h = tgt_sizes[batch_idx][0]
337
+ nb_patches_w = tgt_sizes[batch_idx][1]
338
+ else:
339
+ nb_patches_h = p_attn_mask[:, 0].sum()
340
+ nb_patches_w = p_attn_mask[0].sum()
341
+
342
+ fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h)
343
+ fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w)
344
+
345
+ bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
346
+ bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
347
+
348
+ pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
349
+ position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids
350
+
351
+ position_ids = position_ids.to(self.position_embedding.weight.device)
352
+
353
+ embeddings = embeddings + self.position_embedding(position_ids)
354
+ return embeddings
355
+
356
+
357
+ class SiglipAttention(nn.Module):
358
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
359
+
360
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__
361
+ def __init__(self, config):
362
+ super().__init__()
363
+ self.config = config
364
+ self.embed_dim = config.hidden_size
365
+ self.num_heads = config.num_attention_heads
366
+ self.head_dim = self.embed_dim // self.num_heads
367
+ if self.head_dim * self.num_heads != self.embed_dim:
368
+ raise ValueError(
369
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
370
+ f" {self.num_heads})."
371
+ )
372
+ self.scale = self.head_dim**-0.5
373
+ self.dropout = config.attention_dropout
374
+
375
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
376
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
377
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
378
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
379
+
380
+ def forward(
381
+ self,
382
+ hidden_states: torch.Tensor,
383
+ attention_mask: Optional[torch.Tensor] = None,
384
+ output_attentions: Optional[bool] = False,
385
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
386
+ """Input shape: Batch x Time x Channel"""
387
+
388
+ batch_size, q_len, _ = hidden_states.size()
389
+
390
+ query_states = self.q_proj(hidden_states)
391
+ key_states = self.k_proj(hidden_states)
392
+ value_states = self.v_proj(hidden_states)
393
+
394
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
395
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
396
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
397
+
398
+ k_v_seq_len = key_states.shape[-2]
399
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
400
+
401
+ if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
402
+ raise ValueError(
403
+ f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
404
+ f" {attn_weights.size()}"
405
+ )
406
+
407
+ if attention_mask is not None:
408
+ if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
409
+ raise ValueError(
410
+ f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
411
+ )
412
+ attn_weights = attn_weights + attention_mask
413
+
414
+ # upcast attention to fp32
415
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
416
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
417
+ attn_output = torch.matmul(attn_weights, value_states)
418
+
419
+ if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
420
+ raise ValueError(
421
+ f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
422
+ f" {attn_output.size()}"
423
+ )
424
+
425
+ attn_output = attn_output.transpose(1, 2).contiguous()
426
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
427
+
428
+ attn_output = self.out_proj(attn_output)
429
+
430
+ return attn_output, attn_weights
431
+
432
+
433
+ class SiglipFlashAttention2(SiglipAttention):
434
+ """
435
+ Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
436
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
437
+ flash attention and deal with padding tokens in case the input contains any of them.
438
+ """
439
+
440
+ def __init__(self, *args, **kwargs):
441
+ super().__init__(*args, **kwargs)
442
+ self.is_causal = False # Hack to make sure we don't use a causal mask
443
+
444
+ def forward(
445
+ self,
446
+ hidden_states: torch.Tensor,
447
+ attention_mask: Optional[torch.LongTensor] = None,
448
+ position_ids: Optional[torch.LongTensor] = None,
449
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
450
+ output_attentions: bool = False,
451
+ use_cache: bool = False,
452
+ **kwargs,
453
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
454
+ output_attentions = False
455
+
456
+ bsz, q_len, _ = hidden_states.size()
457
+
458
+ query_states = self.q_proj(hidden_states)
459
+ key_states = self.k_proj(hidden_states)
460
+ value_states = self.v_proj(hidden_states)
461
+
462
+ # Flash attention requires the input to have the shape
463
+ # batch_size x seq_length x head_dim x hidden_dim
464
+ # therefore we just need to keep the original shape
465
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
466
+ key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
467
+ value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
468
+
469
+ kv_seq_len = key_states.shape[-2]
470
+ if past_key_value is not None:
471
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
472
+ # cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
473
+ # query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
474
+
475
+ # if past_key_value is not None:
476
+ # cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
477
+ # key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
478
+
479
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
480
+ # to be able to avoid many of these transpose/reshape/view.
481
+ query_states = query_states.transpose(1, 2)
482
+ key_states = key_states.transpose(1, 2)
483
+ value_states = value_states.transpose(1, 2)
484
+
485
+ dropout_rate = self.dropout if self.training else 0.0
486
+
487
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
488
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
489
+ # cast them back in the correct dtype just to be sure everything works as expected.
490
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
491
+ # in fp32. (LlamaRMSNorm handles it correctly)
492
+
493
+ input_dtype = query_states.dtype
494
+ if input_dtype == torch.float32:
495
+ if torch.is_autocast_enabled():
496
+ target_dtype = torch.get_autocast_gpu_dtype()
497
+ # Handle the case where the model is quantized
498
+ elif hasattr(self.config, "_pre_quantization_dtype"):
499
+ target_dtype = self.config._pre_quantization_dtype
500
+ else:
501
+ target_dtype = self.q_proj.weight.dtype
502
+
503
+ logger.warning_once(
504
+ "The input hidden states seems to be silently casted in float32, this might be related to the fact"
505
+ " you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
506
+ f" {target_dtype}."
507
+ )
508
+
509
+ query_states = query_states.to(target_dtype)
510
+ key_states = key_states.to(target_dtype)
511
+ value_states = value_states.to(target_dtype)
512
+
513
+ attn_output = self._flash_attention_forward(
514
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
515
+ )
516
+
517
+ attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous()
518
+ attn_output = self.out_proj(attn_output)
519
+
520
+ if not output_attentions:
521
+ attn_weights = None
522
+
523
+ return attn_output, attn_weights
524
+
525
+ def _flash_attention_forward(
526
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
527
+ ):
528
+ """
529
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
530
+ first unpad the input, then computes the attention scores and pad the final attention scores.
531
+ Args:
532
+ query_states (`torch.Tensor`):
533
+ Input query states to be passed to Flash Attention API
534
+ key_states (`torch.Tensor`):
535
+ Input key states to be passed to Flash Attention API
536
+ value_states (`torch.Tensor`):
537
+ Input value states to be passed to Flash Attention API
538
+ attention_mask (`torch.Tensor`):
539
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
540
+ position of padding tokens and 1 for the position of non-padding tokens.
541
+ dropout (`int`, *optional*):
542
+ Attention dropout
543
+ softmax_scale (`float`, *optional*):
544
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
545
+ """
546
+
547
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
548
+ causal = self.is_causal and query_length != 1
549
+
550
+ # Contains at least one padding token in the sequence
551
+ if attention_mask is not None:
552
+ batch_size = query_states.shape[0]
553
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
554
+ query_states, key_states, value_states, attention_mask, query_length
555
+ )
556
+
557
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
558
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
559
+
560
+ attn_output_unpad = flash_attn_varlen_func(
561
+ query_states,
562
+ key_states,
563
+ value_states,
564
+ cu_seqlens_q=cu_seqlens_q,
565
+ cu_seqlens_k=cu_seqlens_k,
566
+ max_seqlen_q=max_seqlen_in_batch_q,
567
+ max_seqlen_k=max_seqlen_in_batch_k,
568
+ dropout_p=dropout,
569
+ softmax_scale=softmax_scale,
570
+ causal=causal,
571
+ )
572
+
573
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
574
+ else:
575
+ attn_output = flash_attn_func(
576
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
577
+ )
578
+
579
+ return attn_output
580
+
581
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
582
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
583
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
584
+
585
+ key_layer = index_first_axis(
586
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
587
+ )
588
+ value_layer = index_first_axis(
589
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
590
+ )
591
+ if query_length == kv_seq_len:
592
+ query_layer = index_first_axis(
593
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
594
+ )
595
+ cu_seqlens_q = cu_seqlens_k
596
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
597
+ indices_q = indices_k
598
+ elif query_length == 1:
599
+ max_seqlen_in_batch_q = 1
600
+ cu_seqlens_q = torch.arange(
601
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
602
+ ) # There is a memcpy here, that is very bad.
603
+ indices_q = cu_seqlens_q[:-1]
604
+ query_layer = query_layer.squeeze(1)
605
+ else:
606
+ # The -q_len: slice assumes left padding.
607
+ attention_mask = attention_mask[:, -query_length:]
608
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
609
+
610
+ return (
611
+ query_layer,
612
+ key_layer,
613
+ value_layer,
614
+ indices_q,
615
+ (cu_seqlens_q, cu_seqlens_k),
616
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
617
+ )
618
+
619
+
620
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Siglip
621
+ class SiglipMLP(nn.Module):
622
+ def __init__(self, config):
623
+ super().__init__()
624
+ self.config = config
625
+ self.activation_fn = ACT2FN[config.hidden_act]
626
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
627
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
628
+
629
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
630
+ hidden_states = self.fc1(hidden_states)
631
+ hidden_states = self.activation_fn(hidden_states)
632
+ hidden_states = self.fc2(hidden_states)
633
+ return hidden_states
634
+
635
+
636
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->Siglip
637
+ class SiglipEncoderLayer(nn.Module):
638
+ def __init__(self, config: SiglipVisionConfig):
639
+ super().__init__()
640
+ self.embed_dim = config.hidden_size
641
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
642
+ self.self_attn = (
643
+ SiglipAttention(config)
644
+ if not self._use_flash_attention_2
645
+ else SiglipFlashAttention2(config)
646
+ )
647
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
648
+ self.mlp = SiglipMLP(config)
649
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
650
+
651
+ def forward(
652
+ self,
653
+ hidden_states: torch.Tensor,
654
+ attention_mask: torch.Tensor,
655
+ output_attentions: Optional[bool] = False,
656
+ ) -> Tuple[torch.FloatTensor]:
657
+ """
658
+ Args:
659
+ hidden_states (`torch.FloatTensor`):
660
+ Input to the layer of shape `(batch, seq_len, embed_dim)`.
661
+ attention_mask (`torch.FloatTensor`):
662
+ Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
663
+ output_attentions (`bool`, *optional*, defaults to `False`):
664
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
665
+ returned tensors for more detail.
666
+ """
667
+ residual = hidden_states
668
+
669
+ hidden_states = self.layer_norm1(hidden_states)
670
+ hidden_states, attn_weights = self.self_attn(
671
+ hidden_states=hidden_states,
672
+ attention_mask=attention_mask,
673
+ output_attentions=output_attentions,
674
+ )
675
+ hidden_states = residual + hidden_states
676
+
677
+ residual = hidden_states
678
+ hidden_states = self.layer_norm2(hidden_states)
679
+ hidden_states = self.mlp(hidden_states)
680
+ hidden_states = residual + hidden_states
681
+
682
+ outputs = (hidden_states,)
683
+
684
+ if output_attentions:
685
+ outputs += (attn_weights,)
686
+
687
+ return outputs
688
+
689
+
690
+ class SiglipPreTrainedModel(PreTrainedModel):
691
+ """
692
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
693
+ models.
694
+ """
695
+
696
+ config_class = SiglipVisionConfig
697
+ base_model_prefix = "siglip"
698
+ supports_gradient_checkpointing = True
699
+
700
+ def _init_weights(self, module):
701
+ """Initialize the weights"""
702
+
703
+ if isinstance(module, SiglipVisionEmbeddings):
704
+ width = self.config.hidden_size
705
+ nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
706
+ elif isinstance(module, nn.Embedding):
707
+ default_flax_embed_init(module.weight)
708
+ elif isinstance(module, SiglipAttention):
709
+ nn.init.normal_(module.q_proj.weight)
710
+ nn.init.normal_(module.k_proj.weight)
711
+ nn.init.normal_(module.v_proj.weight)
712
+ nn.init.normal_(module.out_proj.weight)
713
+ nn.init.zeros_(module.q_proj.bias)
714
+ nn.init.zeros_(module.k_proj.bias)
715
+ nn.init.zeros_(module.v_proj.bias)
716
+ nn.init.zeros_(module.out_proj.bias)
717
+ elif isinstance(module, SiglipMLP):
718
+ nn.init.normal_(module.fc1.weight)
719
+ nn.init.normal_(module.fc2.weight)
720
+ nn.init.normal_(module.fc1.bias, std=1e-6)
721
+ nn.init.normal_(module.fc2.bias, std=1e-6)
722
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
723
+ lecun_normal_(module.weight)
724
+ if module.bias is not None:
725
+ nn.init.zeros_(module.bias)
726
+ elif isinstance(module, nn.LayerNorm):
727
+ module.bias.data.zero_()
728
+ module.weight.data.fill_(1.0)
729
+
730
+
731
+ SIGLIP_START_DOCSTRING = r"""
732
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
733
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
734
+ etc.)
735
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
736
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
737
+ and behavior.
738
+ Parameters:
739
+ config ([`SiglipVisionConfig`]): Model configuration class with all the parameters of the model.
740
+ Initializing with a config file does not load the weights associated with the model, only the
741
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
742
+ """
743
+
744
+
745
+ SIGLIP_VISION_INPUTS_DOCSTRING = r"""
746
+ Args:
747
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
748
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
749
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
750
+ output_attentions (`bool`, *optional*):
751
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
752
+ tensors for more detail.
753
+ output_hidden_states (`bool`, *optional*):
754
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
755
+ more detail.
756
+ return_dict (`bool`, *optional*):
757
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
758
+ """
759
+
760
+
761
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Siglip
762
+ class SiglipEncoder(nn.Module):
763
+ """
764
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
765
+ [`SiglipEncoderLayer`].
766
+ Args:
767
+ config: SiglipConfig
768
+ """
769
+
770
+ def __init__(self, config: SiglipVisionConfig):
771
+ super().__init__()
772
+ self.config = config
773
+ self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
774
+ self.gradient_checkpointing = False
775
+
776
+ # Ignore copy
777
+ def forward(
778
+ self,
779
+ inputs_embeds,
780
+ attention_mask: Optional[torch.Tensor] = None,
781
+ output_attentions: Optional[bool] = None,
782
+ output_hidden_states: Optional[bool] = None,
783
+ return_dict: Optional[bool] = None,
784
+ ) -> Union[Tuple, BaseModelOutput]:
785
+ r"""
786
+ Args:
787
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
788
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
789
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
790
+ than the model's internal embedding lookup matrix.
791
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
792
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
793
+ - 1 for tokens that are **not masked**,
794
+ - 0 for tokens that are **masked**.
795
+ [What are attention masks?](../glossary#attention-mask)
796
+ output_attentions (`bool`, *optional*):
797
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
798
+ returned tensors for more detail.
799
+ output_hidden_states (`bool`, *optional*):
800
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
801
+ for more detail.
802
+ return_dict (`bool`, *optional*):
803
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
804
+ """
805
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
806
+ output_hidden_states = (
807
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
808
+ )
809
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
810
+
811
+ encoder_states = () if output_hidden_states else None
812
+ all_attentions = () if output_attentions else None
813
+
814
+ hidden_states = inputs_embeds
815
+ for encoder_layer in self.layers:
816
+ if output_hidden_states:
817
+ encoder_states = encoder_states + (hidden_states,)
818
+ if self.gradient_checkpointing and self.training:
819
+ layer_outputs = self._gradient_checkpointing_func(
820
+ encoder_layer.__call__,
821
+ hidden_states,
822
+ attention_mask,
823
+ output_attentions,
824
+ )
825
+ else:
826
+ layer_outputs = encoder_layer(
827
+ hidden_states,
828
+ attention_mask,
829
+ output_attentions=output_attentions,
830
+ )
831
+
832
+ hidden_states = layer_outputs[0]
833
+
834
+ if output_attentions:
835
+ all_attentions = all_attentions + (layer_outputs[1],)
836
+
837
+ if output_hidden_states:
838
+ encoder_states = encoder_states + (hidden_states,)
839
+
840
+ if not return_dict:
841
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
842
+ return BaseModelOutput(
843
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
844
+ )
845
+
846
+ @add_start_docstrings(
847
+ """The vision model from SigLIP without any head or projection on top.""",
848
+ SIGLIP_START_DOCSTRING
849
+ )
850
+ class SiglipVisionTransformer(SiglipPreTrainedModel):
851
+ config_class = SiglipVisionConfig
852
+ main_input_name = "pixel_values"
853
+ _supports_flash_attn_2 = True
854
+
855
+ def __init__(self, config: SiglipVisionConfig):
856
+ super().__init__(config)
857
+ self.config = config
858
+ embed_dim = config.hidden_size
859
+
860
+ self.embeddings = SiglipVisionEmbeddings(config)
861
+ self.encoder = SiglipEncoder(config)
862
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
863
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
864
+
865
+ # Initialize weights and apply final processing
866
+ self.post_init()
867
+
868
+ def get_input_embeddings(self) -> nn.Module:
869
+ return self.embeddings.patch_embedding
870
+
871
+ @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
872
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig)
873
+ def forward(
874
+ self,
875
+ pixel_values,
876
+ patch_attention_mask: Optional[torch.BoolTensor] = None,
877
+ tgt_sizes: Optional[torch.IntTensor] = None,
878
+ output_attentions: Optional[bool] = None,
879
+ output_hidden_states: Optional[bool] = None,
880
+ return_dict: Optional[bool] = None,
881
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
882
+ r"""
883
+ Returns:
884
+ """
885
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
886
+ output_hidden_states = (
887
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
888
+ )
889
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
890
+
891
+ batch_size = pixel_values.size(0)
892
+ if patch_attention_mask is None:
893
+ patch_attention_mask = torch.ones(
894
+ size=(
895
+ batch_size,
896
+ pixel_values.size(2) // self.config.patch_size,
897
+ pixel_values.size(3) // self.config.patch_size,
898
+ ),
899
+ dtype=torch.bool,
900
+ device=pixel_values.device,
901
+ )
902
+
903
+ hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask, tgt_sizes=tgt_sizes)
904
+
905
+ patch_attention_mask = patch_attention_mask.view(batch_size, -1)
906
+ # The call to `_upad_input` in `_flash_attention_forward` is expensive
907
+ # So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence),
908
+ # avoiding passing the attention_mask, which is equivalent to attending to the full sequence
909
+ if not torch.any(~patch_attention_mask):
910
+ attention_mask=None
911
+ else:
912
+ attention_mask = (
913
+ _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype)
914
+ if not self._use_flash_attention_2
915
+ else patch_attention_mask
916
+ )
917
+
918
+ encoder_outputs = self.encoder(
919
+ inputs_embeds=hidden_states,
920
+ attention_mask=attention_mask,
921
+ output_attentions=output_attentions,
922
+ output_hidden_states=output_hidden_states,
923
+ return_dict=return_dict,
924
+ )
925
+
926
+ last_hidden_state = encoder_outputs[0]
927
+ last_hidden_state = self.post_layernorm(last_hidden_state)
928
+
929
+ if not return_dict:
930
+ return (last_hidden_state, None) + encoder_outputs[1:]
931
+
932
+ return BaseModelOutputWithPooling(
933
+ last_hidden_state=last_hidden_state,
934
+ pooler_output=None,
935
+ hidden_states=encoder_outputs.hidden_states,
936
+ attentions=encoder_outputs.attentions,
937
+ )
checkpoint-5000/resampler.py ADDED
@@ -0,0 +1,782 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Optional, Tuple
3
+ import numpy as np
4
+ import warnings
5
+
6
+ import torch
7
+ from torch import nn
8
+ from torch import Tensor
9
+ import torch.nn.functional as F
10
+ from torch.nn.functional import *
11
+ from torch.nn.modules.activation import *
12
+ from torch.nn.init import trunc_normal_, constant_, xavier_normal_, xavier_uniform_
13
+
14
+ from transformers.integrations import is_deepspeed_zero3_enabled
15
+
16
+ def get_2d_sincos_pos_embed(embed_dim, image_size):
17
+ """
18
+ image_size: image_size or (image_height, image_width)
19
+ return:
20
+ pos_embed: [image_height, image_width, embed_dim]
21
+ """
22
+ if isinstance(image_size, int):
23
+ grid_h_size, grid_w_size = image_size, image_size
24
+ else:
25
+ grid_h_size, grid_w_size = image_size[0], image_size[1]
26
+
27
+ grid_h = np.arange(grid_h_size, dtype=np.float32)
28
+ grid_w = np.arange(grid_w_size, dtype=np.float32)
29
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
30
+ grid = np.stack(grid, axis=0)
31
+
32
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
33
+ return pos_embed
34
+
35
+
36
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
37
+ assert embed_dim % 2 == 0
38
+
39
+ # use half of dimensions to encode grid_h
40
+ emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[0]) # (H, W, D/2)
41
+ emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[1]) # (H, W, D/2)
42
+
43
+ emb = np.concatenate([emb_h, emb_w], axis=-1) # (H, W, D)
44
+ return emb
45
+
46
+
47
+ def get_1d_sincos_pos_embed_from_grid_new(embed_dim, pos):
48
+ """
49
+ embed_dim: output dimension for each position
50
+ pos: a list of positions to be encoded: size (H, W)
51
+ out: (H, W, D)
52
+ """
53
+ assert embed_dim % 2 == 0
54
+ omega = np.arange(embed_dim // 2, dtype=np.float32)
55
+ omega /= embed_dim / 2.
56
+ omega = 1. / 10000 ** omega # (D/2,)
57
+
58
+ out = np.einsum('hw,d->hwd', pos, omega) # (H, W, D/2), outer product
59
+
60
+ emb_sin = np.sin(out) # (H, W, D/2)
61
+ emb_cos = np.cos(out) # (H, W, D/2)
62
+
63
+ emb = np.concatenate([emb_sin, emb_cos], axis=-1) # (H, W, D)
64
+ return emb
65
+
66
+
67
+ class Resampler(nn.Module):
68
+ """
69
+ A 2D perceiver-resampler network with one cross attention layers by
70
+ given learnable queries and 2d sincos pos_emb
71
+ Outputs:
72
+ A tensor with the shape of (batch_size, num_queries, embed_dim)
73
+ """
74
+
75
+ def __init__(
76
+ self,
77
+ num_queries,
78
+ embed_dim,
79
+ num_heads,
80
+ kv_dim=None,
81
+ norm_layer=partial(nn.LayerNorm, eps=1e-6),
82
+ adaptive=False,
83
+ max_size=(70, 70),
84
+ ):
85
+ super().__init__()
86
+ self.num_queries = num_queries
87
+ self.embed_dim = embed_dim
88
+ self.num_heads = num_heads
89
+ self.adaptive = adaptive
90
+ self.max_size = max_size
91
+
92
+ self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
93
+
94
+ if kv_dim is not None and kv_dim != embed_dim:
95
+ self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
96
+ else:
97
+ self.kv_proj = nn.Identity()
98
+
99
+ self.attn = MultiheadAttention(embed_dim, num_heads)
100
+ self.ln_q = norm_layer(embed_dim)
101
+ self.ln_kv = norm_layer(embed_dim)
102
+
103
+ self.ln_post = norm_layer(embed_dim)
104
+ self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim))
105
+
106
+ self._set_2d_pos_cache(self.max_size)
107
+
108
+ def _set_2d_pos_cache(self, max_size, device='cpu'):
109
+ if is_deepspeed_zero3_enabled():
110
+ device='cuda'
111
+ pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.embed_dim, max_size)).float().to(device)
112
+ self.register_buffer("pos_embed", pos_embed, persistent=False)
113
+
114
+ def _adjust_pos_cache(self, tgt_sizes, device):
115
+ max_h = torch.max(tgt_sizes[:, 0])
116
+ max_w = torch.max(tgt_sizes[:, 1])
117
+ if max_h > self.max_size[0] or max_w > self.max_size[1]:
118
+ self.max_size = [max(max_h, self.max_size[0]), max(max_w, self.max_size[1])]
119
+ self._set_2d_pos_cache(self.max_size, device)
120
+
121
+ def _init_weights(self, m):
122
+ if isinstance(m, nn.Linear):
123
+ trunc_normal_(m.weight, std=.02)
124
+ if isinstance(m, nn.Linear) and m.bias is not None:
125
+ nn.init.constant_(m.bias, 0)
126
+ elif isinstance(m, nn.LayerNorm):
127
+ nn.init.constant_(m.bias, 0)
128
+ nn.init.constant_(m.weight, 1.0)
129
+
130
+ def forward(self, x, tgt_sizes=None):
131
+ assert x.shape[0] == tgt_sizes.shape[0]
132
+ bs = x.shape[0]
133
+
134
+ device = x.device
135
+ dtype = x.dtype
136
+
137
+ patch_len = tgt_sizes[:, 0] * tgt_sizes[:, 1]
138
+
139
+ self._adjust_pos_cache(tgt_sizes, device=device)
140
+
141
+ max_patch_len = torch.max(patch_len)
142
+ key_padding_mask = torch.zeros((bs, max_patch_len), dtype=torch.bool, device=device)
143
+
144
+ pos_embed = []
145
+ for i in range(bs):
146
+ tgt_h, tgt_w = tgt_sizes[i]
147
+ pos_embed.append(self.pos_embed[:tgt_h, :tgt_w, :].reshape((tgt_h * tgt_w, -1)).to(dtype)) # patches * D
148
+ key_padding_mask[i, patch_len[i]:] = True
149
+
150
+ pos_embed = torch.nn.utils.rnn.pad_sequence(
151
+ pos_embed, batch_first=True, padding_value=0.0).permute(1, 0, 2) # BLD => L * B * D
152
+
153
+ x = self.kv_proj(x) # B * L * D
154
+ x = self.ln_kv(x).permute(1, 0, 2) # L * B * D
155
+
156
+ q = self.ln_q(self.query) # Q * D
157
+
158
+ out = self.attn(
159
+ self._repeat(q, bs), # Q * B * D
160
+ x + pos_embed, # L * B * D + L * B * D
161
+ x,
162
+ key_padding_mask=key_padding_mask)[0]
163
+ # out: Q * B * D
164
+ x = out.permute(1, 0, 2) # B * Q * D
165
+
166
+ x = self.ln_post(x)
167
+ x = x @ self.proj
168
+ return x
169
+
170
+ def _repeat(self, query, N: int):
171
+ return query.unsqueeze(1).repeat(1, N, 1)
172
+
173
+
174
+ class MultiheadAttention(nn.MultiheadAttention):
175
+ def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False,
176
+ add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None):
177
+ super().__init__(embed_dim, num_heads, dropout, bias, add_bias_kv, add_zero_attn, kdim, vdim, batch_first, device, dtype)
178
+
179
+ # rewrite out_proj layer,with nn.Linear
180
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, device=device, dtype=dtype)
181
+
182
+ def forward(
183
+ self,
184
+ query: Tensor,
185
+ key: Tensor,
186
+ value: Tensor,
187
+ key_padding_mask: Optional[Tensor] = None,
188
+ need_weights: bool = True,
189
+ attn_mask: Optional[Tensor] = None,
190
+ average_attn_weights: bool = True,
191
+ is_causal : bool = False) -> Tuple[Tensor, Optional[Tensor]]:
192
+ why_not_fast_path = ''
193
+ if ((attn_mask is not None and torch.is_floating_point(attn_mask))
194
+ or (key_padding_mask is not None) and torch.is_floating_point(key_padding_mask)):
195
+ why_not_fast_path = "floating-point masks are not supported for fast path."
196
+
197
+ is_batched = query.dim() == 3
198
+
199
+ key_padding_mask = _canonical_mask(
200
+ mask=key_padding_mask,
201
+ mask_name="key_padding_mask",
202
+ other_type=F._none_or_dtype(attn_mask),
203
+ other_name="attn_mask",
204
+ target_type=query.dtype
205
+ )
206
+
207
+ attn_mask = _canonical_mask(
208
+ mask=attn_mask,
209
+ mask_name="attn_mask",
210
+ other_type=None,
211
+ other_name="",
212
+ target_type=query.dtype,
213
+ check_other=False,
214
+ )
215
+
216
+
217
+ if not is_batched:
218
+ why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
219
+ elif query is not key or key is not value:
220
+ # When lifting this restriction, don't forget to either
221
+ # enforce that the dtypes all match or test cases where
222
+ # they don't!
223
+ why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
224
+ elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
225
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
226
+ elif self.in_proj_weight is None:
227
+ why_not_fast_path = "in_proj_weight was None"
228
+ elif query.dtype != self.in_proj_weight.dtype:
229
+ # this case will fail anyway, but at least they'll get a useful error message.
230
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
231
+ elif self.training:
232
+ why_not_fast_path = "training is enabled"
233
+ elif (self.num_heads % 2) != 0:
234
+ why_not_fast_path = "self.num_heads is not even"
235
+ elif not self.batch_first:
236
+ why_not_fast_path = "batch_first was not True"
237
+ elif self.bias_k is not None:
238
+ why_not_fast_path = "self.bias_k was not None"
239
+ elif self.bias_v is not None:
240
+ why_not_fast_path = "self.bias_v was not None"
241
+ elif self.add_zero_attn:
242
+ why_not_fast_path = "add_zero_attn was enabled"
243
+ elif not self._qkv_same_embed_dim:
244
+ why_not_fast_path = "_qkv_same_embed_dim was not True"
245
+ elif query.is_nested and (key_padding_mask is not None or attn_mask is not None):
246
+ why_not_fast_path = "supplying both src_key_padding_mask and src_mask at the same time \
247
+ is not supported with NestedTensor input"
248
+ elif torch.is_autocast_enabled():
249
+ why_not_fast_path = "autocast is enabled"
250
+
251
+ if not why_not_fast_path:
252
+ tensor_args = (
253
+ query,
254
+ key,
255
+ value,
256
+ self.in_proj_weight,
257
+ self.in_proj_bias,
258
+ self.out_proj.weight,
259
+ self.out_proj.bias,
260
+ )
261
+ # We have to use list comprehensions below because TorchScript does not support
262
+ # generator expressions.
263
+ if torch.overrides.has_torch_function(tensor_args):
264
+ why_not_fast_path = "some Tensor argument has_torch_function"
265
+ elif _is_make_fx_tracing():
266
+ why_not_fast_path = "we are running make_fx tracing"
267
+ elif not all(_check_arg_device(x) for x in tensor_args):
268
+ why_not_fast_path = ("some Tensor argument's device is neither one of "
269
+ f"cpu, cuda or {torch.utils.backend_registration._privateuse1_backend_name}")
270
+ elif torch.is_grad_enabled() and any(_arg_requires_grad(x) for x in tensor_args):
271
+ why_not_fast_path = ("grad is enabled and at least one of query or the "
272
+ "input/output projection weights or biases requires_grad")
273
+ if not why_not_fast_path:
274
+ merged_mask, mask_type = self.merge_masks(attn_mask, key_padding_mask, query)
275
+
276
+ if self.in_proj_bias is not None and self.in_proj_weight is not None:
277
+ return torch._native_multi_head_attention(
278
+ query,
279
+ key,
280
+ value,
281
+ self.embed_dim,
282
+ self.num_heads,
283
+ self.in_proj_weight,
284
+ self.in_proj_bias,
285
+ self.out_proj.weight,
286
+ self.out_proj.bias,
287
+ merged_mask,
288
+ need_weights,
289
+ average_attn_weights,
290
+ mask_type)
291
+
292
+ any_nested = query.is_nested or key.is_nested or value.is_nested
293
+ assert not any_nested, ("MultiheadAttention does not support NestedTensor outside of its fast path. " +
294
+ f"The fast path was not hit because {why_not_fast_path}")
295
+
296
+ if self.batch_first and is_batched:
297
+ # make sure that the transpose op does not affect the "is" property
298
+ if key is value:
299
+ if query is key:
300
+ query = key = value = query.transpose(1, 0)
301
+ else:
302
+ query, key = (x.transpose(1, 0) for x in (query, key))
303
+ value = key
304
+ else:
305
+ query, key, value = (x.transpose(1, 0) for x in (query, key, value))
306
+
307
+ if not self._qkv_same_embed_dim:
308
+ attn_output, attn_output_weights = self.multi_head_attention_forward(
309
+ query, key, value, self.embed_dim, self.num_heads,
310
+ self.in_proj_weight, self.in_proj_bias,
311
+ self.bias_k, self.bias_v, self.add_zero_attn,
312
+ self.dropout, self.out_proj.weight, self.out_proj.bias,
313
+ training=self.training,
314
+ key_padding_mask=key_padding_mask, need_weights=need_weights,
315
+ attn_mask=attn_mask,
316
+ use_separate_proj_weight=True,
317
+ q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
318
+ v_proj_weight=self.v_proj_weight,
319
+ average_attn_weights=average_attn_weights,
320
+ is_causal=is_causal)
321
+ else:
322
+ attn_output, attn_output_weights = self.multi_head_attention_forward(
323
+ query, key, value, self.embed_dim, self.num_heads,
324
+ self.in_proj_weight, self.in_proj_bias,
325
+ self.bias_k, self.bias_v, self.add_zero_attn,
326
+ self.dropout, self.out_proj.weight, self.out_proj.bias,
327
+ training=self.training,
328
+ key_padding_mask=key_padding_mask,
329
+ need_weights=need_weights,
330
+ attn_mask=attn_mask,
331
+ average_attn_weights=average_attn_weights,
332
+ is_causal=is_causal)
333
+ if self.batch_first and is_batched:
334
+ return attn_output.transpose(1, 0), attn_output_weights
335
+ else:
336
+ return attn_output, attn_output_weights
337
+
338
+ def multi_head_attention_forward(
339
+ self,
340
+ query: Tensor,
341
+ key: Tensor,
342
+ value: Tensor,
343
+ embed_dim_to_check: int,
344
+ num_heads: int,
345
+ in_proj_weight: Optional[Tensor],
346
+ in_proj_bias: Optional[Tensor],
347
+ bias_k: Optional[Tensor],
348
+ bias_v: Optional[Tensor],
349
+ add_zero_attn: bool,
350
+ dropout_p: float,
351
+ out_proj_weight: Tensor,
352
+ out_proj_bias: Optional[Tensor],
353
+ training: bool = True,
354
+ key_padding_mask: Optional[Tensor] = None,
355
+ need_weights: bool = True,
356
+ attn_mask: Optional[Tensor] = None,
357
+ use_separate_proj_weight: bool = False,
358
+ q_proj_weight: Optional[Tensor] = None,
359
+ k_proj_weight: Optional[Tensor] = None,
360
+ v_proj_weight: Optional[Tensor] = None,
361
+ static_k: Optional[Tensor] = None,
362
+ static_v: Optional[Tensor] = None,
363
+ average_attn_weights: bool = True,
364
+ is_causal: bool = False,
365
+ ) -> Tuple[Tensor, Optional[Tensor]]:
366
+ tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
367
+
368
+ is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads)
369
+
370
+ # For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
371
+ # is batched, run the computation and before returning squeeze the
372
+ # batch dimension so that the output doesn't carry this temporary batch dimension.
373
+ if not is_batched:
374
+ # unsqueeze if the input is unbatched
375
+ query = query.unsqueeze(1)
376
+ key = key.unsqueeze(1)
377
+ value = value.unsqueeze(1)
378
+ if key_padding_mask is not None:
379
+ key_padding_mask = key_padding_mask.unsqueeze(0)
380
+
381
+ # set up shape vars
382
+ tgt_len, bsz, embed_dim = query.shape
383
+ src_len, _, _ = key.shape
384
+
385
+ key_padding_mask = _canonical_mask(
386
+ mask=key_padding_mask,
387
+ mask_name="key_padding_mask",
388
+ other_type=_none_or_dtype(attn_mask),
389
+ other_name="attn_mask",
390
+ target_type=query.dtype
391
+ )
392
+
393
+ if is_causal and attn_mask is None:
394
+ raise RuntimeError(
395
+ "Need attn_mask if specifying the is_causal hint. "
396
+ "You may use the Transformer module method "
397
+ "`generate_square_subsequent_mask` to create this mask."
398
+ )
399
+
400
+ if is_causal and key_padding_mask is None and not need_weights:
401
+ # when we have a kpm or need weights, we need attn_mask
402
+ # Otherwise, we use the is_causal hint go as is_causal
403
+ # indicator to SDPA.
404
+ attn_mask = None
405
+ else:
406
+ attn_mask = _canonical_mask(
407
+ mask=attn_mask,
408
+ mask_name="attn_mask",
409
+ other_type=None,
410
+ other_name="",
411
+ target_type=query.dtype,
412
+ check_other=False,
413
+ )
414
+
415
+ if key_padding_mask is not None:
416
+ # We have the attn_mask, and use that to merge kpm into it.
417
+ # Turn off use of is_causal hint, as the merged mask is no
418
+ # longer causal.
419
+ is_causal = False
420
+
421
+ assert embed_dim == embed_dim_to_check, \
422
+ f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
423
+ if isinstance(embed_dim, torch.Tensor):
424
+ # embed_dim can be a tensor when JIT tracing
425
+ head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
426
+ else:
427
+ head_dim = embed_dim // num_heads
428
+ assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
429
+ if use_separate_proj_weight:
430
+ # allow MHA to have different embedding dimensions when separate projection weights are used
431
+ assert key.shape[:2] == value.shape[:2], \
432
+ f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
433
+ else:
434
+ assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
435
+
436
+ #
437
+ # compute in-projection
438
+ #
439
+ if not use_separate_proj_weight:
440
+ assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None"
441
+ q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
442
+ else:
443
+ assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"
444
+ assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None"
445
+ assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None"
446
+ if in_proj_bias is None:
447
+ b_q = b_k = b_v = None
448
+ else:
449
+ b_q, b_k, b_v = in_proj_bias.chunk(3)
450
+ q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v)
451
+
452
+ # prep attention mask
453
+
454
+ if attn_mask is not None:
455
+ # ensure attn_mask's dim is 3
456
+ if attn_mask.dim() == 2:
457
+ correct_2d_size = (tgt_len, src_len)
458
+ if attn_mask.shape != correct_2d_size:
459
+ raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.")
460
+ attn_mask = attn_mask.unsqueeze(0)
461
+ elif attn_mask.dim() == 3:
462
+ correct_3d_size = (bsz * num_heads, tgt_len, src_len)
463
+ if attn_mask.shape != correct_3d_size:
464
+ raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.")
465
+ else:
466
+ raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
467
+
468
+ # add bias along batch dimension (currently second)
469
+ if bias_k is not None and bias_v is not None:
470
+ assert static_k is None, "bias cannot be added to static key."
471
+ assert static_v is None, "bias cannot be added to static value."
472
+ k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
473
+ v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
474
+ if attn_mask is not None:
475
+ attn_mask = pad(attn_mask, (0, 1))
476
+ if key_padding_mask is not None:
477
+ key_padding_mask = pad(key_padding_mask, (0, 1))
478
+ else:
479
+ assert bias_k is None
480
+ assert bias_v is None
481
+
482
+ #
483
+ # reshape q, k, v for multihead attention and make em batch first
484
+ #
485
+ q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
486
+ if static_k is None:
487
+ k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
488
+ else:
489
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
490
+ assert static_k.size(0) == bsz * num_heads, \
491
+ f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
492
+ assert static_k.size(2) == head_dim, \
493
+ f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
494
+ k = static_k
495
+ if static_v is None:
496
+ v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
497
+ else:
498
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
499
+ assert static_v.size(0) == bsz * num_heads, \
500
+ f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
501
+ assert static_v.size(2) == head_dim, \
502
+ f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
503
+ v = static_v
504
+
505
+ # add zero attention along batch dimension (now first)
506
+ if add_zero_attn:
507
+ zero_attn_shape = (bsz * num_heads, 1, head_dim)
508
+ k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
509
+ v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
510
+ if attn_mask is not None:
511
+ attn_mask = pad(attn_mask, (0, 1))
512
+ if key_padding_mask is not None:
513
+ key_padding_mask = pad(key_padding_mask, (0, 1))
514
+
515
+ # update source sequence length after adjustments
516
+ src_len = k.size(1)
517
+
518
+ # merge key padding and attention masks
519
+ if key_padding_mask is not None:
520
+ assert key_padding_mask.shape == (bsz, src_len), \
521
+ f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
522
+ key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \
523
+ expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)
524
+ if attn_mask is None:
525
+ attn_mask = key_padding_mask
526
+ else:
527
+ attn_mask = attn_mask + key_padding_mask
528
+
529
+ # adjust dropout probability
530
+ if not training:
531
+ dropout_p = 0.0
532
+
533
+ #
534
+ # (deep breath) calculate attention and out projection
535
+ #
536
+
537
+ if need_weights:
538
+ B, Nt, E = q.shape
539
+ q_scaled = q / math.sqrt(E)
540
+
541
+ assert not (is_causal and attn_mask is None), "FIXME: is_causal not implemented for need_weights"
542
+
543
+ if attn_mask is not None:
544
+ attn_output_weights = torch.baddbmm(attn_mask, q_scaled, k.transpose(-2, -1))
545
+ else:
546
+ attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
547
+ attn_output_weights = softmax(attn_output_weights, dim=-1)
548
+ if dropout_p > 0.0:
549
+ attn_output_weights = dropout(attn_output_weights, p=dropout_p)
550
+
551
+ attn_output = torch.bmm(attn_output_weights, v)
552
+
553
+ attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
554
+ attn_output = self.out_proj(attn_output)
555
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
556
+
557
+ # optionally average attention weights over heads
558
+ attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
559
+ if average_attn_weights:
560
+ attn_output_weights = attn_output_weights.mean(dim=1)
561
+
562
+ if not is_batched:
563
+ # squeeze the output if input was unbatched
564
+ attn_output = attn_output.squeeze(1)
565
+ attn_output_weights = attn_output_weights.squeeze(0)
566
+ return attn_output, attn_output_weights
567
+ else:
568
+ # attn_mask can be either (L,S) or (N*num_heads, L, S)
569
+ # if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
570
+ # in order to match the input for SDPA of (N, num_heads, L, S)
571
+ if attn_mask is not None:
572
+ if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
573
+ attn_mask = attn_mask.unsqueeze(0)
574
+ else:
575
+ attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
576
+
577
+ q = q.view(bsz, num_heads, tgt_len, head_dim)
578
+ k = k.view(bsz, num_heads, src_len, head_dim)
579
+ v = v.view(bsz, num_heads, src_len, head_dim)
580
+
581
+ attn_output = F.scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal)
582
+ attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
583
+
584
+ attn_output = self.out_proj(attn_output)
585
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
586
+ if not is_batched:
587
+ # squeeze the output if input was unbatched
588
+ attn_output = attn_output.squeeze(1)
589
+ return attn_output, None
590
+
591
+
592
+ def _mha_shape_check(query: Tensor, key: Tensor, value: Tensor,
593
+ key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], num_heads: int):
594
+ # Verifies the expected shape for `query, `key`, `value`, `key_padding_mask` and `attn_mask`
595
+ # and returns if the input is batched or not.
596
+ # Raises an error if `query` is not 2-D (unbatched) or 3-D (batched) tensor.
597
+
598
+ # Shape check.
599
+ if query.dim() == 3:
600
+ # Batched Inputs
601
+ is_batched = True
602
+ assert key.dim() == 3 and value.dim() == 3, \
603
+ ("For batched (3-D) `query`, expected `key` and `value` to be 3-D"
604
+ f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
605
+ if key_padding_mask is not None:
606
+ assert key_padding_mask.dim() == 2, \
607
+ ("For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D"
608
+ f" but found {key_padding_mask.dim()}-D tensor instead")
609
+ if attn_mask is not None:
610
+ assert attn_mask.dim() in (2, 3), \
611
+ ("For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
612
+ f" but found {attn_mask.dim()}-D tensor instead")
613
+ elif query.dim() == 2:
614
+ # Unbatched Inputs
615
+ is_batched = False
616
+ assert key.dim() == 2 and value.dim() == 2, \
617
+ ("For unbatched (2-D) `query`, expected `key` and `value` to be 2-D"
618
+ f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
619
+
620
+ if key_padding_mask is not None:
621
+ assert key_padding_mask.dim() == 1, \
622
+ ("For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D"
623
+ f" but found {key_padding_mask.dim()}-D tensor instead")
624
+
625
+ if attn_mask is not None:
626
+ assert attn_mask.dim() in (2, 3), \
627
+ ("For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
628
+ f" but found {attn_mask.dim()}-D tensor instead")
629
+ if attn_mask.dim() == 3:
630
+ expected_shape = (num_heads, query.shape[0], key.shape[0])
631
+ assert attn_mask.shape == expected_shape, \
632
+ (f"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}")
633
+ else:
634
+ raise AssertionError(
635
+ f"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor")
636
+
637
+ return is_batched
638
+
639
+
640
+ def _canonical_mask(
641
+ mask: Optional[Tensor],
642
+ mask_name: str,
643
+ other_type: Optional[DType],
644
+ other_name: str,
645
+ target_type: DType,
646
+ check_other: bool = True,
647
+ ) -> Optional[Tensor]:
648
+
649
+ if mask is not None:
650
+ _mask_dtype = mask.dtype
651
+ _mask_is_float = torch.is_floating_point(mask)
652
+ if _mask_dtype != torch.bool and not _mask_is_float:
653
+ raise AssertionError(
654
+ f"only bool and floating types of {mask_name} are supported")
655
+ if check_other and other_type is not None:
656
+ if _mask_dtype != other_type:
657
+ warnings.warn(
658
+ f"Support for mismatched {mask_name} and {other_name} "
659
+ "is deprecated. Use same type for both instead."
660
+ )
661
+ if not _mask_is_float:
662
+ mask = (
663
+ torch.zeros_like(mask, dtype=target_type)
664
+ .masked_fill_(mask, float("-inf"))
665
+ )
666
+ return mask
667
+
668
+
669
+ def _none_or_dtype(input: Optional[Tensor]) -> Optional[DType]:
670
+ if input is None:
671
+ return None
672
+ elif isinstance(input, torch.Tensor):
673
+ return input.dtype
674
+ raise RuntimeError("input to _none_or_dtype() must be None or torch.Tensor")
675
+
676
+ def _in_projection_packed(
677
+ q: Tensor,
678
+ k: Tensor,
679
+ v: Tensor,
680
+ w: Tensor,
681
+ b: Optional[Tensor] = None,
682
+ ) -> List[Tensor]:
683
+ r"""
684
+ Performs the in-projection step of the attention operation, using packed weights.
685
+ Output is a triple containing projection tensors for query, key and value.
686
+ Args:
687
+ q, k, v: query, key and value tensors to be projected. For self-attention,
688
+ these are typically the same tensor; for encoder-decoder attention,
689
+ k and v are typically the same tensor. (We take advantage of these
690
+ identities for performance if they are present.) Regardless, q, k and v
691
+ must share a common embedding dimension; otherwise their shapes may vary.
692
+ w: projection weights for q, k and v, packed into a single tensor. Weights
693
+ are packed along dimension 0, in q, k, v order.
694
+ b: optional projection biases for q, k and v, packed into a single tensor
695
+ in q, k, v order.
696
+ Shape:
697
+ Inputs:
698
+ - q: :math:`(..., E)` where E is the embedding dimension
699
+ - k: :math:`(..., E)` where E is the embedding dimension
700
+ - v: :math:`(..., E)` where E is the embedding dimension
701
+ - w: :math:`(E * 3, E)` where E is the embedding dimension
702
+ - b: :math:`E * 3` where E is the embedding dimension
703
+ Output:
704
+ - in output list :math:`[q', k', v']`, each output tensor will have the
705
+ same shape as the corresponding input tensor.
706
+ """
707
+ E = q.size(-1)
708
+ if k is v:
709
+ if q is k:
710
+ # self-attention
711
+ proj = linear(q, w, b)
712
+ # reshape to 3, E and not E, 3 is deliberate for better memory coalescing and keeping same order as chunk()
713
+ proj = proj.unflatten(-1, (3, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
714
+ return proj[0], proj[1], proj[2]
715
+ else:
716
+ # encoder-decoder attention
717
+ w_q, w_kv = w.split([E, E * 2])
718
+ if b is None:
719
+ b_q = b_kv = None
720
+ else:
721
+ b_q, b_kv = b.split([E, E * 2])
722
+ q_proj = linear(q, w_q, b_q)
723
+ kv_proj = linear(k, w_kv, b_kv)
724
+ # reshape to 2, E and not E, 2 is deliberate for better memory coalescing and keeping same order as chunk()
725
+ kv_proj = kv_proj.unflatten(-1, (2, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
726
+ return (q_proj, kv_proj[0], kv_proj[1])
727
+ else:
728
+ w_q, w_k, w_v = w.chunk(3)
729
+ if b is None:
730
+ b_q = b_k = b_v = None
731
+ else:
732
+ b_q, b_k, b_v = b.chunk(3)
733
+ return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
734
+
735
+
736
+ def _in_projection(
737
+ q: Tensor,
738
+ k: Tensor,
739
+ v: Tensor,
740
+ w_q: Tensor,
741
+ w_k: Tensor,
742
+ w_v: Tensor,
743
+ b_q: Optional[Tensor] = None,
744
+ b_k: Optional[Tensor] = None,
745
+ b_v: Optional[Tensor] = None,
746
+ ) -> Tuple[Tensor, Tensor, Tensor]:
747
+ r"""
748
+ Performs the in-projection step of the attention operation. This is simply
749
+ a triple of linear projections, with shape constraints on the weights which
750
+ ensure embedding dimension uniformity in the projected outputs.
751
+ Output is a triple containing projection tensors for query, key and value.
752
+ Args:
753
+ q, k, v: query, key and value tensors to be projected.
754
+ w_q, w_k, w_v: weights for q, k and v, respectively.
755
+ b_q, b_k, b_v: optional biases for q, k and v, respectively.
756
+ Shape:
757
+ Inputs:
758
+ - q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any
759
+ number of leading dimensions.
760
+ - k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any
761
+ number of leading dimensions.
762
+ - v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any
763
+ number of leading dimensions.
764
+ - w_q: :math:`(Eq, Eq)`
765
+ - w_k: :math:`(Eq, Ek)`
766
+ - w_v: :math:`(Eq, Ev)`
767
+ - b_q: :math:`(Eq)`
768
+ - b_k: :math:`(Eq)`
769
+ - b_v: :math:`(Eq)`
770
+ Output: in output triple :math:`(q', k', v')`,
771
+ - q': :math:`[Qdims..., Eq]`
772
+ - k': :math:`[Kdims..., Eq]`
773
+ - v': :math:`[Vdims..., Eq]`
774
+ """
775
+ Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1)
776
+ assert w_q.shape == (Eq, Eq), f"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}"
777
+ assert w_k.shape == (Eq, Ek), f"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}"
778
+ assert w_v.shape == (Eq, Ev), f"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}"
779
+ assert b_q is None or b_q.shape == (Eq,), f"expecting query bias shape of {(Eq,)}, but got {b_q.shape}"
780
+ assert b_k is None or b_k.shape == (Eq,), f"expecting key bias shape of {(Eq,)}, but got {b_k.shape}"
781
+ assert b_v is None or b_v.shape == (Eq,), f"expecting value bias shape of {(Eq,)}, but got {b_v.shape}"
782
+ return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
checkpoint-5000/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:038329a940174c7998542fe9a3c903ee0c21d0a2351959a1cb53ac9af3988f89
3
+ size 15984
checkpoint-5000/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc2eb62cd1e31d2c95a28eaadd97a496b27751983378626efc3ee2a53ae743ff
3
+ size 15984
checkpoint-5000/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c76f1ee6b7bfb2e7a6ca68f028fe40297bc56fa8287959be7a51545af2a824b
3
+ size 15984
checkpoint-5000/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e4716e0a98e24ecef0261fa45d53aa531ce14f99b4992682e0257f7c483a80d
3
+ size 15984
checkpoint-5000/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:732644bf9682f11fe70f041a2575d5822769847a3b56320e6cfdbd3914eb98f9
3
+ size 15984