Wiselnn commited on
Commit
4eedc1a
·
verified ·
1 Parent(s): 2aed558

Upload folder using huggingface_hub (#1)

Browse files

- 38f5b556f06890448db1f33d9d49c9917ed211b0ac3115f85f86b8ed5040ff42 (13cf0f22813c92fd7244e41d967499bb9225ef1b)
- d2d35138218eb2dd3999a9fa4f7f93b5fef9ea7fe1e60270b943d1ba85993fec (6305af35015794574a82daff20d38a67909ccf48)

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. vision_niah_d/easy_context/__init__.py +56 -0
  2. vision_niah_d/easy_context/__pycache__/__init__.cpython-310.pyc +0 -0
  3. vision_niah_d/easy_context/__pycache__/low_mem_cross_ent.cpython-310.pyc +0 -0
  4. vision_niah_d/easy_context/__pycache__/modeling_qwen2.cpython-310.pyc +0 -0
  5. vision_niah_d/easy_context/accelerate_configs/deepspeed_inference.yaml +17 -0
  6. vision_niah_d/easy_context/accelerate_configs/single_node.yaml +17 -0
  7. vision_niah_d/easy_context/accelerate_configs/two_node.yaml +16 -0
  8. vision_niah_d/easy_context/accelerate_configs/zero3_offload.json +52 -0
  9. vision_niah_d/easy_context/accelerate_configs/zero3_offload_inference.json +21 -0
  10. vision_niah_d/easy_context/dist_flash_attn/README.md +11 -0
  11. vision_niah_d/easy_context/dist_flash_attn/__pycache__/async_communication.cpython-310.pyc +0 -0
  12. vision_niah_d/easy_context/dist_flash_attn/__pycache__/lightseq_async_attn.cpython-310.pyc +0 -0
  13. vision_niah_d/easy_context/dist_flash_attn/__pycache__/monkey_patch.cpython-310.pyc +0 -0
  14. vision_niah_d/easy_context/dist_flash_attn/__pycache__/prepare_input.cpython-310.pyc +0 -0
  15. vision_niah_d/easy_context/dist_flash_attn/async_communication.py +527 -0
  16. vision_niah_d/easy_context/dist_flash_attn/lightseq_async_attn.py +743 -0
  17. vision_niah_d/easy_context/dist_flash_attn/lightseq_async_attn_varlen.py +772 -0
  18. vision_niah_d/easy_context/dist_flash_attn/monkey_patch.py +609 -0
  19. vision_niah_d/easy_context/dist_flash_attn/prepare_input.py +36 -0
  20. vision_niah_d/easy_context/low_mem_cross_ent.py +94 -0
  21. vision_niah_d/easy_context/low_mem_cross_ent_tests/test_correctness.py +81 -0
  22. vision_niah_d/easy_context/low_mem_cross_ent_tests/test_mem_and_speed.py +80 -0
  23. vision_niah_d/easy_context/modeling_qwen2.py +1397 -0
  24. vision_niah_d/easy_context/ulysses_attn/__pycache__/monkey_patch.cpython-310.pyc +0 -0
  25. vision_niah_d/easy_context/ulysses_attn/__pycache__/prepare_inputs.cpython-310.pyc +0 -0
  26. vision_niah_d/easy_context/ulysses_attn/monkey_patch.py +110 -0
  27. vision_niah_d/easy_context/ulysses_attn/prepare_inputs.py +45 -0
  28. vision_niah_d/easy_context/unsloth_offloaded_gradient_checkpoint/__pycache__/monkey_patch.cpython-310.pyc +0 -0
  29. vision_niah_d/easy_context/unsloth_offloaded_gradient_checkpoint/monkey_patch.py +94 -0
  30. vision_niah_d/easy_context/zigzag_ring_attn/__pycache__/monkey_patch.cpython-310.pyc +0 -0
  31. vision_niah_d/easy_context/zigzag_ring_attn/__pycache__/prepare_inputs.cpython-310.pyc +0 -0
  32. vision_niah_d/easy_context/zigzag_ring_attn/monkey_patch.py +113 -0
  33. vision_niah_d/easy_context/zigzag_ring_attn/prepare_inputs.py +41 -0
  34. vision_niah_d/eval_debug.sh +47 -0
  35. vision_niah_d/eval_debug_interrupt.sh +44 -0
  36. vision_niah_d/eval_vision_niah.py +552 -0
  37. vision_niah_d/eval_vision_niah_interrupt.py +572 -0
  38. vision_niah_d/needle_datasets/dataset.json +27 -0
  39. vision_niah_d/needle_datasets/dataset_interrupt.json +27 -0
  40. vision_niah_d/needle_datasets/git_placeholder +0 -0
  41. vision_niah_d/needle_datasets/images/astronaut.png +3 -0
  42. vision_niah_d/needle_datasets/images/construction_site.png +3 -0
  43. vision_niah_d/needle_datasets/images/dolphin.png +3 -0
  44. vision_niah_d/needle_datasets/images/llava-next.png +3 -0
  45. vision_niah_d/needle_datasets/images/panda_scientist.png +3 -0
  46. vision_niah_d/needle_datasets/images/panda_scientist_interrupt.png +3 -0
  47. vision_niah_d/needle_datasets/images/selenium_green.jpg +3 -0
  48. vision_niah_d/needle_datasets/images/selenium_green_interrupt.png +3 -0
  49. vision_niah_d/needle_datasets/images/sora_balloon.png +3 -0
  50. vision_niah_d/needle_datasets/images/sora_balloon_interrupt.png +3 -0
vision_niah_d/easy_context/__init__.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .dist_flash_attn.prepare_input import prepare_dist_flash_attn_inputs
2
+ from .dist_flash_attn.monkey_patch import apply_dist_flash_attn_monkey_patch_llama
3
+ from .zigzag_ring_attn.prepare_inputs import prepare_zigzag_ring_attn_inputs
4
+ from .zigzag_ring_attn.monkey_patch import apply_zigzag_ring_attn_monkey_patch_llama
5
+ from .zigzag_ring_attn.monkey_patch import apply_zigzag_ring_attn_monkey_patch_mistral
6
+ from .unsloth_offloaded_gradient_checkpoint.monkey_patch import apply_unsloth_offloaded_gradient_checkpoint_monkey_patch
7
+ from .ulysses_attn.prepare_inputs import prepare_ulysses_attn_inputs
8
+ from .ulysses_attn.monkey_patch import apply_ulysses_attn_monkey_patch_llama
9
+ from .modeling_qwen2 import Qwen2ForCausalLM_RingAttn
10
+ def prepare_seq_parallel_inputs(
11
+ seq_algo, input_ids, position_ids, target_ids, rank, world_size, device
12
+ ):
13
+ if seq_algo == "zigzag_ring_attn":
14
+ return prepare_zigzag_ring_attn_inputs(
15
+ input_ids, position_ids, target_ids, rank, world_size, device
16
+ )
17
+ elif seq_algo == "dist_flash_attn":
18
+ return prepare_dist_flash_attn_inputs(
19
+ input_ids, position_ids, target_ids, rank, world_size, device
20
+ )
21
+ elif seq_algo == "ulysses_attn":
22
+ return prepare_ulysses_attn_inputs(
23
+ input_ids, position_ids, target_ids, rank, world_size, device
24
+ )
25
+ elif seq_algo == "data_parallel":
26
+ return {
27
+ "local_input_ids": input_ids.to(device),
28
+ "local_position_ids": position_ids.to(device),
29
+ "local_target_ids": target_ids.to(device),
30
+ }
31
+ else:
32
+ raise ValueError(f"Invalid seq_algo: {seq_algo}")
33
+
34
+ def apply_seq_parallel_monkey_patch(
35
+ seq_algo, model
36
+ ):
37
+ assert seq_algo in ["zigzag_ring_attn", "dist_flash_attn", "ulysses_attn", "data_parallel"], f"Invalid seq_algo: {seq_algo}"
38
+ assert model in ["llama", "mistral"], f"Invalid model: {model}"
39
+ if seq_algo == "data_parallel":
40
+ return
41
+ elif seq_algo == "zigzag_ring_attn" and model == "llama":
42
+ apply_zigzag_ring_attn_monkey_patch_llama()
43
+ elif seq_algo == "zigzag_ring_attn" and model == "mistral":
44
+ apply_zigzag_ring_attn_monkey_patch_mistral()
45
+ elif seq_algo == "dist_flash_attn" and model == "llama":
46
+ apply_dist_flash_attn_monkey_patch_llama()
47
+ elif seq_algo == "ulysses_attn" and model == "llama":
48
+ apply_ulysses_attn_monkey_patch_llama()
49
+ else:
50
+ raise ValueError(f"Invalid seq_algo: {seq_algo} or model: {model}")
51
+
52
+ def prepare_dataloader(seq_algo, dataloader, acclerator):
53
+ if seq_algo == "data_parallel":
54
+ return acclerator.prepare(dataloader)
55
+ else:
56
+ return dataloader
vision_niah_d/easy_context/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.1 kB). View file
 
vision_niah_d/easy_context/__pycache__/low_mem_cross_ent.cpython-310.pyc ADDED
Binary file (3.12 kB). View file
 
vision_niah_d/easy_context/__pycache__/modeling_qwen2.cpython-310.pyc ADDED
Binary file (39.5 kB). View file
 
vision_niah_d/easy_context/accelerate_configs/deepspeed_inference.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ compute_environment: LOCAL_MACHINE
2
+ debug: false
3
+ deepspeed_config:
4
+ deepspeed_config_file: easy_context/accelerate_configs/zero3_offload_inference.json
5
+ zero3_init_flag: false
6
+ distributed_type: DEEPSPEED
7
+ downcast_bf16: 'no'
8
+ machine_rank: 0
9
+ main_training_function: main
10
+ num_machines: 1
11
+ num_processes: 8
12
+ rdzv_backend: static
13
+ same_network: true
14
+ tpu_env: []
15
+ tpu_use_cluster: false
16
+ tpu_use_sudo: false
17
+ use_cpu: false
vision_niah_d/easy_context/accelerate_configs/single_node.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ compute_environment: LOCAL_MACHINE
2
+ debug: false
3
+ deepspeed_config:
4
+ deepspeed_config_file: easy_context/accelerate_configs/zero3_offload.json
5
+ zero3_init_flag: false
6
+ distributed_type: DEEPSPEED
7
+ downcast_bf16: 'no'
8
+ machine_rank: 0
9
+ main_training_function: main
10
+ num_machines: 1
11
+ num_processes: 8
12
+ rdzv_backend: static
13
+ same_network: true
14
+ tpu_env: []
15
+ tpu_use_cluster: false
16
+ tpu_use_sudo: false
17
+ use_cpu: false
vision_niah_d/easy_context/accelerate_configs/two_node.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ debug: false
2
+ deepspeed_config:
3
+ deepspeed_config_file: easy_context/accelerate_configs/zero3_offload.json
4
+ deepspeed_multinode_launcher: standard
5
+ zero3_init_flag: false
6
+ distributed_type: DEEPSPEED
7
+ downcast_bf16: 'no'
8
+ num_machines: 2
9
+ num_processes: 16
10
+ main_training_function: main
11
+ rdzv_backend: c10d
12
+ same_network: false
13
+ tpu_env: []
14
+ tpu_use_cluster: false
15
+ tpu_use_sudo: false
16
+ use_cpu: false
vision_niah_d/easy_context/accelerate_configs/zero3_offload.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bf16": {
3
+ "enabled": "auto"
4
+ },
5
+ "fp16": {
6
+ "enabled": "auto"
7
+ },
8
+ "scheduler": {
9
+ "type": "WarmupLR",
10
+ "params": {
11
+ "warmup_min_lr": 1e-5,
12
+ "warmup_max_lr": 1e-5,
13
+ "warmup_num_steps": 0,
14
+ "warmup_type": "linear"
15
+ }
16
+ },
17
+ "optimizer": {
18
+ "type": "AdamW",
19
+ "params": {
20
+ "lr": "auto",
21
+ "betas": [0.9, 0.95],
22
+ "eps": 1e-8,
23
+ "weight_decay": 0.1
24
+ }
25
+ },
26
+ "zero_optimization": {
27
+ "stage": 3,
28
+ "offload_optimizer": {
29
+ "device": "cpu",
30
+ "pin_memory": true
31
+ },
32
+ "offload_param": {
33
+ "device": "cpu",
34
+ "pin_memory": true
35
+ },
36
+ "overlap_comm": true,
37
+ "contiguous_gradients": true,
38
+ "sub_group_size": 1e9,
39
+ "reduce_bucket_size": "auto",
40
+ "stage3_prefetch_bucket_size": "auto",
41
+ "stage3_param_persistence_threshold": "auto",
42
+ "stage3_max_live_parameters": 1e9,
43
+ "stage3_max_reuse_distance": 1e9,
44
+ "stage3_gather_16bit_weights_on_model_save": true
45
+ },
46
+ "gradient_accumulation_steps": "auto",
47
+ "gradient_clipping": "auto",
48
+ "steps_per_print": 2000,
49
+ "train_batch_size": "auto",
50
+ "train_micro_batch_size_per_gpu": 1,
51
+ "wall_clock_breakdown": false
52
+ }
vision_niah_d/easy_context/accelerate_configs/zero3_offload_inference.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bf16": {
3
+ "enabled": "auto"
4
+ },
5
+ "fp16": {
6
+ "enabled": "auto"
7
+ },
8
+ "zero_optimization": {
9
+ "stage": 3,
10
+ "stage3_prefetch_bucket_size": 33554432,
11
+ "stage3_param_persistence_threshold": 4096,
12
+ "stage3_max_live_parameters":33554432,
13
+ "offload_param": {
14
+ "device": "cpu",
15
+ "pin_memory": true
16
+ }
17
+ },
18
+ "train_batch_size": 8,
19
+ "train_micro_batch_size_per_gpu": 1,
20
+ "wall_clock_breakdown": false
21
+ }
vision_niah_d/easy_context/dist_flash_attn/README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LightSeq
2
+ Taken from https://github.com/RulinShao/LightSeq. All credits to the authors.
3
+
4
+ ```
5
+ @article{li2023lightseq,
6
+ title={LIGHTSEQ: SEQUENCE LEVEL PARALLELISM FOR DISTRIBUTED TRAINING OF LONG CONTEXT TRANS},
7
+ author={Li, Dacheng and Shao, Rulin and Xie𝑠, Anze and Xing𝑐𝑚, Eric P and Gonzalez𝑏, Joseph E and Stoica𝑏, Ion and Ma𝑢, Xuezhe and Zhang𝑠, Hao},
8
+ journal={arXiv preprint arXiv:2310.03294},
9
+ year={2023}
10
+ }
11
+ ```
vision_niah_d/easy_context/dist_flash_attn/__pycache__/async_communication.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
vision_niah_d/easy_context/dist_flash_attn/__pycache__/lightseq_async_attn.cpython-310.pyc ADDED
Binary file (20.9 kB). View file
 
vision_niah_d/easy_context/dist_flash_attn/__pycache__/monkey_patch.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
vision_niah_d/easy_context/dist_flash_attn/__pycache__/prepare_input.cpython-310.pyc ADDED
Binary file (748 Bytes). View file
 
vision_niah_d/easy_context/dist_flash_attn/async_communication.py ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ import math
3
+ import os
4
+
5
+ import torch
6
+ import torch.distributed as dist
7
+ from torch.distributed import batch_isend_irecv, P2POp, isend, irecv
8
+
9
+ # Sequence parallel group that the current rank belongs to.
10
+ _SEQUENCE_PARALLEL_GROUP = None
11
+
12
+ # These values enable us to change the sequence parallel sizes on the fly.
13
+ _SEQUENCE_PARALLEL_SIZE = None
14
+ _SEQUENCE_PARALLEL_RANK = None
15
+
16
+ # Global buffer for P2P
17
+ _PEER_Q = None
18
+ _PEER_K = None
19
+ _PEER_V = None
20
+ _PEER_M = None
21
+ _PEER_L = None
22
+ _PEER_O = None
23
+ _PEER_Q_BWD = None
24
+ _PEER_K_BWD = None
25
+ _PEER_V_BWD = None
26
+ _PEER_O_BWD = None
27
+
28
+ _DELTA_DQ = None
29
+ _PEER_L = None
30
+ _DELTA_DK = None
31
+ _DELTA_DV = None
32
+ _DK_DELTA_FROM_PEER = None
33
+ _DV_DELTA_FROM_PEER = None
34
+ _PEER_DO = None
35
+
36
+
37
+ _fwd_send_volume = 0
38
+ _fwd_recv_volume = 0
39
+ _bwd_send_volume = 0
40
+ _bwd_recv_volume = 0
41
+
42
+ def initialize_distributed():
43
+ if dist.is_initialized():
44
+ if dist.get_rank() == 0:
45
+ print(
46
+ "torch distributed is already initialized, "
47
+ "skipping initialization ...",
48
+ flush=True,
49
+ )
50
+ else:
51
+ if int(os.environ["RANK"]) == 0:
52
+ print("Initializing Torch distributed.")
53
+ dist.init_process_group(backend="nccl")
54
+ local_world_size = int(os.environ["LOCAL_WORLD_SIZE"])
55
+ global_world_size = dist.get_world_size()
56
+ torch.cuda.set_device(dist.get_rank() % local_world_size)
57
+
58
+ _initialize_sequence_parallel()
59
+ # create_nccl_communicators()
60
+
61
+ def _initialize_sequence_parallel(sequence_parallel_size=None):
62
+ # Get world size and rank. Ensure some consistencies.
63
+ assert sequence_parallel_size is None, "Multiple sequence parallel group not implemented."
64
+ assert torch.distributed.is_initialized()
65
+ world_size: int = torch.distributed.get_world_size()
66
+
67
+ if sequence_parallel_size is None:
68
+ sequence_parallel_size = world_size
69
+ else:
70
+ assert world_size % sequence_parallel_size == 0
71
+ num_sequence_parallel_groups: int = world_size // sequence_parallel_size
72
+
73
+ rank = torch.distributed.get_rank()
74
+
75
+ # Build the sequence parallel groups.
76
+ global _SEQUENCE_PARALLEL_GROUP
77
+ global _SEQUENCE_PARALLEL_RANK
78
+ global _SEQUENCE_PARALLEL_SIZE
79
+
80
+ assert (
81
+ _SEQUENCE_PARALLEL_GROUP is None
82
+ ), 'sequence parallel group is already initialized'
83
+ for i in range(num_sequence_parallel_groups):
84
+ ranks = range(i * sequence_parallel_size, (i + 1) * sequence_parallel_size)
85
+ group = torch.distributed.new_group(ranks)
86
+ if rank in ranks:
87
+ _SEQUENCE_PARALLEL_GROUP = group
88
+ _SEQUENCE_PARALLEL_RANK = ranks.index(rank)
89
+ _SEQUENCE_PARALLEL_SIZE = len(ranks)
90
+
91
+ if dist.get_rank() == 0:
92
+ print("************ Finish sequence pralell group Initialization. ***********")
93
+ # _set_global_memory_buffer()
94
+
95
+ def maybe_get_set_global_memory_buffer(q, k, v, m, l, o):
96
+ global _PEER_Q, _PEER_K, _PEER_V, _PEER_M, _PEER_L, _PEER_O
97
+ if _PEER_Q is None:
98
+ try:
99
+ if get_sequence_parallel_rank() == 0:
100
+ print("Initializing global memoery buffer.")
101
+ except:
102
+ print("Initializing global memoery buffer.")
103
+ _PEER_Q = [torch.empty_like(q) for _ in range(2)]
104
+ _PEER_K = [torch.empty_like(k) for _ in range(2)]
105
+ _PEER_V = [torch.empty_like(v) for _ in range(2)]
106
+ _PEER_M = [torch.empty_like(m) for _ in range(2)]
107
+ _PEER_L = [torch.empty_like(l) for _ in range(2)]
108
+ _PEER_O = [torch.empty_like(o) for _ in range(2)]
109
+
110
+ return _PEER_Q, _PEER_K, _PEER_V, _PEER_M, _PEER_L, _PEER_O
111
+
112
+ def maybe_get_set_global_memory_buffer_bwd(dq, dk, dv, q, L, k, v, o, do):
113
+ global _DELTA_DQ, _DELTA_DK, _DELTA_DV, _DK_DELTA_FROM_PEER, _DV_DELTA_FROM_PEER,_PEER_Q_BWD, _PEER_L, _PEER_K_BWD, _PEER_V_BWD, _PEER_O_BWD, _PEER_DO
114
+ if _DELTA_DQ is None:
115
+ try:
116
+ if get_sequence_parallel_rank() == 0:
117
+ print("Initializing global memoery buffer for backward.")
118
+ except:
119
+ print("Initializing global memoery buffer for backward.")
120
+ _DELTA_DQ = [torch.empty_like(dq) for _ in range(2)]
121
+ _DELTA_DK = [torch.empty_like(dk) for _ in range(2)]
122
+ _DELTA_DV = [torch.empty_like(dv) for _ in range(2)]
123
+ _PEER_L = [torch.empty_like(L) for _ in range(2)]
124
+
125
+ _DK_DELTA_FROM_PEER = torch.empty_like(dk)
126
+ _DV_DELTA_FROM_PEER = torch.empty_like(dv)
127
+
128
+ # may already be initailized in the forward call.
129
+ # current forward and backward needs a transpose in q's format
130
+ _PEER_Q_BWD = [torch.empty_like(q) for _ in range(2)]
131
+ _PEER_K_BWD = [torch.empty_like(k) for _ in range(2)]
132
+ _PEER_V_BWD = [torch.empty_like(v) for _ in range(2)]
133
+ _PEER_O_BWD = [torch.empty_like(o) for _ in range(2)]
134
+
135
+ _PEER_DO = [torch.empty_like(do) for _ in range(2)]
136
+
137
+ return _DELTA_DQ, _DELTA_DK, _DELTA_DV, _DK_DELTA_FROM_PEER, _DV_DELTA_FROM_PEER, _PEER_Q_BWD, _PEER_L, _PEER_K_BWD, _PEER_V_BWD, _PEER_O_BWD, _PEER_DO
138
+
139
+ def reset_global_memory_buffer():
140
+ global _PEER_Q, _PEER_K, _PEER_V, _PEER_M, _PEER_L, _PEER_O, _DELTA_DQ, _PEER_L, _DELTA_DK, _DELTA_DV, _DK_DELTA_FROM_PEER, _DV_DELTA_FROM_PEER, _PEER_DO
141
+ _PEER_Q = None
142
+ _PEER_K = None
143
+ _PEER_V = None
144
+ _PEER_M = None
145
+ _PEER_L = None
146
+ _PEER_O = None
147
+
148
+ _DELTA_DQ = None
149
+ _PEER_L = None
150
+ _DELTA_DK = None
151
+ _DELTA_DV = None
152
+ _DK_DELTA_FROM_PEER = None
153
+ _DV_DELTA_FROM_PEER = None
154
+ _PEER_DO = None
155
+
156
+ # Pytorch defers the creation of nccl communicators to the first P2P call,
157
+ # We manually create them so the first isend does not hang without an irecv.
158
+ # reference: https://github.com/pytorch/pytorch/blob/main/torch/csrc/cuda/nccl.cpp#L138
159
+ # Only support even number of GPUs.
160
+ def create_nccl_communicators():
161
+ seq_rank = get_sequence_parallel_rank()
162
+ seq_group = get_sequence_parallel_group()
163
+
164
+ empty_tensor = torch.empty(1,).cuda()
165
+ empty_tensor_2 = torch.empty(1,).cuda()
166
+ if torch.distributed.get_rank() % 2 == 0:
167
+ # sender
168
+ op1 = P2POp(op=isend, tensor=torch.empty(1,).cuda(), peer=seq_rank+1, group=seq_group)
169
+ op2 = P2POp(op=irecv, tensor=torch.empty(1,).cuda(), peer=seq_rank+1, group=seq_group)
170
+ #req = torch.distributed.isend(tensor=empty_tensor, dst=seq_rank + 1, group=seq_group)
171
+ dist.batch_isend_irecv([op1, op2])
172
+ else:
173
+ # receiver
174
+ op1 = P2POp(op=irecv, tensor=torch.empty(1,).cuda(), peer=seq_rank-1, group=seq_group)
175
+ op2 = P2POp(op=isend, tensor=torch.empty(1,).cuda(), peer=seq_rank-1, group=seq_group)
176
+ #req = torch.distributed.isend(tensor=empty_tensor, dst=seq_rank + 1, group=seq_group)
177
+ handles = dist.batch_isend_irecv([op1, op2])
178
+ #req = torch.distributed.irecv(tensor=empty_tensor, src=seq_rank - 1, group=seq_group)
179
+ dist.all_reduce(empty_tensor, group=seq_group)
180
+
181
+ def get_sequence_parallel_group():
182
+ """Get the sequence parallel group the caller rank belongs to."""
183
+ #global _SEQUENCE_PARALLEL_GROUP
184
+ assert (
185
+ _SEQUENCE_PARALLEL_GROUP is not None
186
+ ), 'sequence parallel group is not initialized'
187
+ return _SEQUENCE_PARALLEL_GROUP
188
+
189
+ def get_sequence_parallel_rank():
190
+ """Return my rank for the sequence parallel group."""
191
+ global _SEQUENCE_PARALLEL_RANK
192
+ if _SEQUENCE_PARALLEL_RANK is not None:
193
+ return _SEQUENCE_PARALLEL_RANK
194
+ return torch.distributed.get_rank(group=get_sequence_parallel_group())
195
+
196
+ def get_sequence_parallel_size():
197
+ """Return my rank for the sequence parallel group."""
198
+ global _SEQUENCE_PARALLEL_SIZE
199
+ if _SEQUENCE_PARALLEL_SIZE is not None:
200
+ return _SEQUENCE_PARALLEL_SIZE
201
+ return torch.distributed.get_world_size(group=get_sequence_parallel_group())
202
+
203
+ def destroy_sequence_parallel():
204
+ """Set the groups to none."""
205
+ global _SEQUENCE_PARALLEL_GROUP
206
+ _SEQUENCE_PARALLEL_GROUP = None
207
+
208
+ # whether this is the last time the kernel being called
209
+ def is_last_time(time_step):
210
+ # e.g. on a 8-GPU setup:
211
+ # R=0: 0
212
+ # R=1: 1
213
+ # R=2: 2
214
+ # R=3: 3
215
+ # R=4: 4, 5, 6, 7
216
+ seq_rank = get_sequence_parallel_rank()
217
+ seq_world_size = get_sequence_parallel_size()
218
+ if seq_rank <= seq_world_size // 2: # no one helps these ranks
219
+ rank_finish_time = seq_rank
220
+ else:
221
+ rank_finish_time = seq_world_size // 2
222
+ return rank_finish_time == time_step
223
+
224
+ # Whether the current time step is computing for local q
225
+ def is_compute_for_local_query(time_step):
226
+ # R=3,4,5,6,7: Yes
227
+ # R=0: 0
228
+ # R=1: 0, 1
229
+ # R=2: 0, 1, 2
230
+ seq_rank = get_sequence_parallel_rank()
231
+ seq_world_size = get_sequence_parallel_size()
232
+ if seq_rank >= min(seq_world_size // 2, time_step):
233
+ return True
234
+ return False
235
+
236
+ # Whether the current time step is idle
237
+ def is_idle(time_step):
238
+ # 0, 1, 2, 3: 4
239
+ # 4, 5, 6, 7: No
240
+ seq_rank = get_sequence_parallel_rank()
241
+ seq_world_size = get_sequence_parallel_size()
242
+
243
+ if seq_rank < (seq_world_size // 2) and time_step == seq_world_size // 2:
244
+ return True
245
+ return False
246
+
247
+ # Whether the current time step needs to synchronize with a remote computed result
248
+ def is_sync_from_remote(time_step):
249
+ # R=0, 1, 2, 3, 4: No
250
+ # R=5: 4
251
+ # R=6: 3, 4
252
+ # R=7: 2, 3, 4
253
+ seq_rank = get_sequence_parallel_rank()
254
+ seq_world_size = get_sequence_parallel_size()
255
+ if seq_rank > max(seq_world_size // 2, seq_world_size - time_step):
256
+ return True
257
+ return False
258
+
259
+ def maybe_send_recv_fwd_qkvo(q: torch.Tensor, peer_q: torch.Tensor,
260
+ k: torch.Tensor, peer_k: torch.Tensor,
261
+ v: torch.Tensor, peer_v: torch.Tensor,
262
+ o_stats: list,# peer_o_stats: list,
263
+ time_step: int, comm_mode, debug=False) -> torch.Tensor:
264
+
265
+ seq_group = get_sequence_parallel_group()
266
+ seq_rank = get_sequence_parallel_rank()
267
+ seq_world_size = get_sequence_parallel_size()
268
+
269
+ # Handles for operations that actually need to be wait before going to the next iteration.
270
+ # For instance, QKV sender never needs to wait -> it seems fusing these calls help scheduler;
271
+ all_handles = []
272
+ # KV logic: different than older version, every rank to send/recv its own kv,
273
+ # to balance communication. In a balanced communication, every step each rank
274
+ # should send/recv 4 tensors in total (kv, or qo). For instance, rank 0 when
275
+ # time step > 0, should send its own kv and send/recv qo. In the older version,
276
+ # rank 0 does not send its kv, and rely on a later rank to pass it, where the
277
+ # later rank has to (1) receive kv, send rank 0's kv and send/recv qo.
278
+ # Q (load balancing) logic: semantically, this will be "%" world size, so
279
+ # the same send/recv rank as KV. Note: Only support even number of machines.
280
+ # O (load balancing) logic: rank 0 sends result to rank 7 at time 1.
281
+ # It get delayed for one time step, and thus has different maybe_send/recv_rank.
282
+ # Use (time_step + 1) to easily convert to synchornize version.
283
+ maybe_send_rank = seq_rank + (time_step + 1)
284
+ maybe_recv_rank = seq_rank - (time_step + 1)
285
+
286
+ if debug:
287
+ global _fwd_send_volume, _fwd_recv_volume, _bwd_send_volume, _bwd_recv_volume
288
+ _debug_send = _fwd_send_volume
289
+ _debug_recv = _fwd_recv_volume
290
+
291
+ if maybe_send_rank >= seq_world_size:
292
+ #send q, no one needs to do remote computation in the last time step
293
+ if time_step < (seq_world_size // 2 - 1):
294
+ #print(f"t={time_step}: R={seq_rank} sends q to {maybe_send_rank % seq_world_size} (not wait)")
295
+ #q_send_handles.append(P2POp(op=isend, tensor=q, peer=maybe_send_rank % seq_world_size, group=seq_group))
296
+ all_handles.append(P2POp(op=isend, tensor=q, peer=maybe_send_rank % seq_world_size, group=seq_group))
297
+ if debug:
298
+ _fwd_send_volume += torch.numel(q) * q.element_size()
299
+ else:
300
+ # send kv
301
+ #print(f"t={time_step}: R={seq_rank} sends kv to {maybe_send_rank} (not wait)")
302
+ #kv_send_handles.append(P2POp(op=isend, tensor=k, peer=maybe_send_rank, group=seq_group))
303
+ #kv_send_handles.append(P2POp(op=isend, tensor=v, peer=maybe_send_rank, group=seq_group))
304
+ all_handles.append(P2POp(op=isend, tensor=k, peer=maybe_send_rank, group=seq_group))
305
+ all_handles.append(P2POp(op=isend, tensor=v, peer=maybe_send_rank, group=seq_group))
306
+ if debug:
307
+ _fwd_send_volume += torch.numel(k) * k.element_size()
308
+ _fwd_send_volume += torch.numel(v) * v.element_size()
309
+
310
+ if maybe_recv_rank < 0:
311
+ # recv q, no one needs to do remote computation in the last time step
312
+ if time_step < (seq_world_size // 2 - 1):
313
+ # print(f"t={time_step}: R={seq_rank} receives q from {maybe_recv_rank % seq_world_size} (wait)")
314
+ #q_recv_handles.append(P2POp(op=irecv, tensor=peer_q, peer=maybe_recv_rank % seq_world_size, group=seq_group))
315
+ all_handles.append(P2POp(op=irecv, tensor=peer_q, peer=maybe_recv_rank % seq_world_size, group=seq_group))
316
+ if debug:
317
+ _fwd_recv_volume += torch.numel(peer_q) * peer_q.element_size()
318
+ else:
319
+ # recv kv
320
+ #print(f"t={time_step}: R={seq_rank} receivs kv from {maybe_recv_rank} (wait)")
321
+ #kv_recv_handles.append(P2POp(op=irecv, tensor=peer_k, peer=maybe_recv_rank, group=seq_group))
322
+ #kv_recv_handles.append(P2POp(op=irecv, tensor=peer_v, peer=maybe_recv_rank, group=seq_group))
323
+ all_handles.append(P2POp(op=irecv, tensor=peer_k, peer=maybe_recv_rank, group=seq_group))
324
+ all_handles.append(P2POp(op=irecv, tensor=peer_v, peer=maybe_recv_rank, group=seq_group))
325
+ if debug:
326
+ _fwd_recv_volume += torch.numel(peer_k) * peer_k.element_size()
327
+ _fwd_recv_volume += torch.numel(peer_v) * peer_v.element_size()
328
+
329
+ maybe_send_rank_o = seq_rank - (time_step - 1)
330
+ maybe_recv_rank_o = seq_rank + (time_step - 1)
331
+ if maybe_send_rank_o < 0 and time_step > 1:
332
+ for t in o_stats:
333
+ # print(f"t={time_step}: R={seq_rank} sends o to {maybe_send_rank_o % seq_world_size} (wait)")
334
+ #o_send_handles.append(P2POp(op=isend, tensor=t, peer=maybe_send_rank_o % seq_world_size, group=seq_group))
335
+ all_handles.append(P2POp(op=isend, tensor=t, peer=maybe_send_rank_o % seq_world_size, group=seq_group))
336
+ if debug:
337
+ _fwd_send_volume += torch.numel(t) * t.element_size()
338
+ if maybe_recv_rank_o >= seq_world_size and time_step > 1 :
339
+ for t in o_stats:
340
+ # print(f"t={time_step}: R={seq_rank} receives o from {maybe_recv_rank_o % seq_world_size} (wait)")
341
+ #o_recv_handles.append(P2POp(op=irecv, tensor=t, peer=maybe_recv_rank_o % seq_world_size, group=seq_group))
342
+ all_handles.append(P2POp(op=irecv, tensor=t, peer=maybe_recv_rank_o % seq_world_size, group=seq_group))
343
+ if debug:
344
+ _fwd_recv_volume += torch.numel(t) * t.element_size()
345
+
346
+ #reqs = []
347
+
348
+ if debug:
349
+ if seq_rank in [0, 8]:
350
+ print(f"R={seq_rank} time_step={time_step} increases: send {(_fwd_send_volume - _debug_send) * 1e-9} GB recv {(_fwd_recv_volume - _debug_recv) * 1e-9} GB")
351
+ #return reqs
352
+ all_reqs = launch_async_handles(all_handles, comm_mode)
353
+ return [all_reqs]
354
+
355
+ # delta: may be you are using it for your local compute or as a distributed buffer to send to others
356
+ # .. Sorry for the bad naming..
357
+ def maybe_send_recv_bwd_qkvo(dq_delta: torch.Tensor, dk_delta: torch.Tensor,
358
+ dv_delta: torch.Tensor, dk_delta_from_peer: torch.Tensor,
359
+ dv_delta_from_peer: torch.Tensor, q: torch.Tensor,
360
+ peer_q: torch.Tensor, L: torch.Tensor,
361
+ peer_L: torch.Tensor, k: torch.Tensor,
362
+ peer_k: torch.Tensor, v: torch.Tensor,
363
+ peer_v: torch.Tensor, o: torch.Tensor,
364
+ peer_o: torch.Tensor, do: torch.Tensor,
365
+ peer_do: torch.Tensor, time_step: int, comm_mode, debug=False):
366
+
367
+ seq_group = get_sequence_parallel_group()
368
+ seq_rank = get_sequence_parallel_rank()
369
+ seq_world_size = get_sequence_parallel_size()
370
+
371
+ all_handles = []
372
+ maybe_send_rank = seq_rank + (time_step + 1)
373
+ maybe_recv_rank = seq_rank - (time_step + 1)
374
+
375
+ if debug:
376
+ global _fwd_send_volume, _fwd_recv_volume, _bwd_send_volume, _bwd_recv_volume
377
+
378
+ if maybe_send_rank >= seq_world_size:
379
+ #send q, no one needs to do remote computation in the last time step
380
+ if time_step < (seq_world_size // 2 - 1):
381
+ all_handles.append(P2POp(op=isend, tensor=q, peer=maybe_send_rank % seq_world_size, group=seq_group))
382
+ all_handles.append(P2POp(op=isend, tensor=L, peer=maybe_send_rank % seq_world_size, group=seq_group))
383
+ all_handles.append(P2POp(op=isend, tensor=o, peer=maybe_send_rank % seq_world_size, group=seq_group))
384
+ all_handles.append(P2POp(op=isend, tensor=do, peer=maybe_send_rank % seq_world_size, group=seq_group))
385
+ if debug:
386
+ _bwd_send_volume += torch.numel(q) * q.element_size()
387
+ _bwd_send_volume += torch.numel(L) * L.element_size()
388
+ _bwd_send_volume += torch.numel(o) * o.element_size()
389
+ _bwd_send_volume += torch.numel(do) * do.element_size()
390
+ else:
391
+ # send kv
392
+ all_handles.append(P2POp(op=isend, tensor=k, peer=maybe_send_rank, group=seq_group))
393
+ all_handles.append(P2POp(op=isend, tensor=v, peer=maybe_send_rank, group=seq_group))
394
+ if debug:
395
+ _bwd_send_volume += torch.numel(k) * k.element_size()
396
+ _bwd_send_volume += torch.numel(v) * v.element_size()
397
+
398
+ if maybe_recv_rank < 0:
399
+ # recv q, no one needs to do remote computation in the last time step
400
+ if time_step < (seq_world_size // 2 - 1):
401
+ all_handles.append(P2POp(op=irecv, tensor=peer_q, peer=maybe_recv_rank % seq_world_size, group=seq_group))
402
+ all_handles.append(P2POp(op=irecv, tensor=peer_L, peer=maybe_recv_rank % seq_world_size, group=seq_group))
403
+ all_handles.append(P2POp(op=irecv, tensor=peer_o, peer=maybe_recv_rank % seq_world_size, group=seq_group))
404
+ all_handles.append(P2POp(op=irecv, tensor=peer_do, peer=maybe_recv_rank % seq_world_size, group=seq_group))
405
+ if debug:
406
+ _bwd_recv_volume += torch.numel(peer_q) * peer_q.element_size()
407
+ _bwd_recv_volume += torch.numel(peer_L) * peer_L.element_size()
408
+ _bwd_recv_volume += torch.numel(peer_o) * peer_o.element_size()
409
+ _bwd_recv_volume += torch.numel(peer_do) * peer_do.element_size()
410
+ else:
411
+ # recv kv
412
+ all_handles.append(P2POp(op=irecv, tensor=peer_k, peer=maybe_recv_rank, group=seq_group))
413
+ all_handles.append(P2POp(op=irecv, tensor=peer_v, peer=maybe_recv_rank, group=seq_group))
414
+ if debug:
415
+ _bwd_recv_volume += torch.numel(peer_k) * peer_k.element_size()
416
+ _bwd_recv_volume += torch.numel(peer_v) * peer_v.element_size()
417
+
418
+ # Whether I should update dq, dk and dv after waiting these requests
419
+ is_update_dq = False
420
+ is_update_dkv = False
421
+
422
+ maybe_send_rank_dqkv = seq_rank - (time_step - 1)
423
+ maybe_recv_rank_dqkv = seq_rank + (time_step - 1)
424
+
425
+ if time_step > 1:
426
+ if maybe_send_rank_dqkv < 0:
427
+ #print(f"BWD t={time_step}: R={seq_rank} sends dq delta to {maybe_send_rank_dqkv % seq_world_size}")
428
+ all_handles.append(P2POp(op=isend, tensor=dq_delta, peer=maybe_send_rank_dqkv % seq_world_size, group=seq_group))
429
+ if debug:
430
+ _bwd_send_volume += torch.numel(dq_delta) * dq_delta.element_size()
431
+ else:
432
+ #print(f"BWD t={time_step}: R={seq_rank} sends dkv delta to {maybe_send_rank_dqkv}")
433
+ all_handles.append(P2POp(op=isend, tensor=dk_delta, peer=maybe_send_rank_dqkv, group=seq_group))
434
+ all_handles.append(P2POp(op=isend, tensor=dv_delta, peer=maybe_send_rank_dqkv, group=seq_group))
435
+ if debug:
436
+ _bwd_send_volume += torch.numel(dk_delta) * dk_delta.element_size()
437
+ _bwd_send_volume += torch.numel(dv_delta) * dv_delta.element_size()
438
+
439
+ if maybe_recv_rank_dqkv >= seq_world_size:
440
+ #print(f"BWD t={time_step}: R={seq_rank} receives dq delta to {maybe_recv_rank_dqkv % seq_world_size}")
441
+ all_handles.append(P2POp(op=irecv, tensor=dq_delta, peer=maybe_recv_rank_dqkv % seq_world_size, group=seq_group))
442
+ is_update_dq = True
443
+ if debug:
444
+ _bwd_recv_volume += torch.numel(dq_delta) * dq_delta.element_size()
445
+ else:
446
+ #print(f"BWD t={time_step}: R={seq_rank} receives dk dv delta from {maybe_recv_rank_dqkv}")
447
+ all_handles.append(P2POp(op=irecv, tensor=dk_delta_from_peer, peer=maybe_recv_rank_dqkv, group=seq_group))
448
+ all_handles.append(P2POp(op=irecv, tensor=dv_delta_from_peer, peer=maybe_recv_rank_dqkv, group=seq_group))
449
+ is_update_dkv = True
450
+ if debug:
451
+ _bwd_recv_volume += torch.numel(dk_delta_from_peer) * dk_delta_from_peer.element_size()
452
+ _bwd_recv_volume += torch.numel(dv_delta_from_peer) * dv_delta_from_peer.element_size()
453
+
454
+ # return [], is_update_dq, is_update_dkv
455
+ all_reqs = launch_async_handles(all_handles, comm_mode)
456
+ return [all_reqs], is_update_dq, is_update_dkv
457
+
458
+ def maybe_send_recv_bwd_last_dkv(dk_delta: torch.Tensor, dv_delta: torch.Tensor, time_step, comm_mode, debug=False):
459
+ is_update_last_dkv = False
460
+
461
+ seq_group = get_sequence_parallel_group()
462
+ seq_rank = get_sequence_parallel_rank()
463
+ seq_world_size = get_sequence_parallel_size()
464
+
465
+ if seq_world_size == 1: return [], is_update_last_dkv
466
+
467
+ all_handles = []
468
+
469
+ if debug:
470
+ global _fwd_send_volume, _fwd_recv_volume, _bwd_send_volume, _bwd_recv_volume
471
+
472
+ if time_step == seq_world_size // 2:
473
+ maybe_send_rank = seq_rank - time_step
474
+ maybe_recv_rank = seq_rank + time_step
475
+
476
+ assert (maybe_send_rank >= 0) ^ (maybe_recv_rank < seq_world_size), "R={seq_rank} should be either sending or receiving dkv in the last time step."
477
+
478
+ if maybe_send_rank >= 0:
479
+ # print(f"BWD t={time_step}: R={seq_rank} last send dkv to {maybe_send_rank}")
480
+ all_handles.append(P2POp(op=isend, tensor=dk_delta, peer=maybe_send_rank, group=seq_group))
481
+ all_handles.append(P2POp(op=isend, tensor=dv_delta, peer=maybe_send_rank, group=seq_group))
482
+ if debug:
483
+ _bwd_send_volume += torch.numel(dk_delta) * dk_delta.element_size()
484
+ _bwd_send_volume += torch.numel(dv_delta) * dv_delta.element_size()
485
+ if maybe_recv_rank < seq_world_size:
486
+ # print(f"BWD t={time_step}: R={seq_rank} last receive dkv from {maybe_recv_rank}")
487
+ all_handles.append(P2POp(op=irecv, tensor=dk_delta, peer=maybe_recv_rank, group=seq_group))
488
+ all_handles.append(P2POp(op=irecv, tensor=dv_delta, peer=maybe_recv_rank, group=seq_group))
489
+ if debug:
490
+ _bwd_recv_volume += torch.numel(dk_delta) * dk_delta.element_size()
491
+ _bwd_recv_volume += torch.numel(dv_delta) * dv_delta.element_size()
492
+ is_update_last_dkv = True
493
+
494
+ # return [], is_update_last_dkv
495
+ all_reqs = launch_async_handles(all_handles, comm_mode)
496
+
497
+ return [all_reqs], is_update_last_dkv
498
+
499
+ def print_and_reset_comm_stats():
500
+ seq_rank = get_sequence_parallel_rank()
501
+
502
+ global _fwd_send_volume, _fwd_recv_volume, _bwd_send_volume, _bwd_recv_volume
503
+ _fwd_send_volume *= 1e-9
504
+ _fwd_recv_volume *= 1e-9
505
+ _bwd_send_volume *= 1e-9
506
+ _bwd_recv_volume *= 1e-9
507
+
508
+ print(f"R={seq_rank} fwd send: {_fwd_send_volume} fwd recv: {_fwd_recv_volume}; bwd send: {_bwd_send_volume}, bwd recv: {_bwd_recv_volume} GB.")
509
+ _fwd_send_volume = 0
510
+ _fwd_recv_volume = 0
511
+ _bwd_send_volume = 0
512
+ _bwd_recv_volume = 0
513
+
514
+ def launch_async_handles(handles, comm_mode):
515
+ global _args
516
+ if comm_mode == "nocomm":
517
+ #print("skipping communication for ablation")
518
+ return []
519
+ if len(handles) > 0:
520
+ return dist.batch_isend_irecv(handles)
521
+ return []
522
+
523
+ def wait_async_handles(reqs):
524
+ if len(reqs) > 0:
525
+ for req in reqs:
526
+ for r in req:
527
+ r.wait()
vision_niah_d/easy_context/dist_flash_attn/lightseq_async_attn.py ADDED
@@ -0,0 +1,743 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import math
3
+
4
+ from einops import rearrange
5
+ import argparse
6
+
7
+ import pytest
8
+ import torch
9
+ import torch.distributed as dist
10
+ from torch.distributed import ReduceOp
11
+ #from torch.profiler import profile, record_function, ProfilerActivity
12
+ import functools
13
+ import triton
14
+ import triton.language as tl
15
+ import time
16
+ import numpy as np
17
+ from tqdm import tqdm
18
+
19
+ try:
20
+ from flash_attn.flash_attn_interface import _flash_attn_forward, _flash_attn_backward
21
+ except:
22
+ pass
23
+
24
+ from .async_communication import (is_last_time, is_compute_for_local_query, is_sync_from_remote, is_idle, print_and_reset_comm_stats,
25
+ launch_async_handles, wait_async_handles, maybe_send_recv_fwd_qkvo, maybe_send_recv_bwd_qkvo, maybe_send_recv_bwd_last_dkv, reset_global_memory_buffer,
26
+ maybe_get_set_global_memory_buffer, maybe_get_set_global_memory_buffer_bwd, initialize_distributed, get_sequence_parallel_size, get_sequence_parallel_rank)
27
+
28
+ @triton.jit
29
+ def max_fn(x, y):
30
+ return tl.math.max(x, y)
31
+
32
+ @triton.jit
33
+ def _rescale_kernel(
34
+ peer_m,
35
+ m,
36
+ peer_l,
37
+ l,
38
+ peer_o,
39
+ o,
40
+ L,
41
+ stride_oz, stride_oh, stride_om, stride_on,
42
+ Z, H, N_CTX,
43
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
44
+ BLOCK_N: tl.constexpr,
45
+ LAST_STEP: tl.constexpr,
46
+ ):
47
+ start_m = tl.program_id(0)
48
+ off_hz = tl.program_id(1)
49
+ o_offset = off_hz * stride_oh
50
+ peer_o_block_ptr = tl.make_block_ptr(
51
+ base=peer_o + o_offset,
52
+ shape=(N_CTX, BLOCK_DMODEL),
53
+ strides=(stride_om, stride_on),
54
+ offsets=(start_m * BLOCK_M, 0),
55
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
56
+ order=(1, 0)
57
+ )
58
+ o_block_ptr = tl.make_block_ptr(
59
+ base=o + o_offset,
60
+ shape=(N_CTX, BLOCK_DMODEL),
61
+ strides=(stride_om, stride_on),
62
+ offsets=(start_m * BLOCK_M, 0),
63
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
64
+ order=(1, 0)
65
+ )
66
+ # initialize offsets
67
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
68
+ offs_n = tl.arange(0, BLOCK_N)
69
+
70
+ peer_m_ptrs = peer_m + off_hz * N_CTX + offs_m
71
+ m_ptrs = m + off_hz * N_CTX + offs_m
72
+ peer_l_ptrs = peer_l + off_hz * N_CTX + offs_m
73
+ l_ptrs = l + off_hz * N_CTX + offs_m
74
+
75
+ peer_m_i = tl.load(peer_m_ptrs)
76
+ peer_m_i = peer_m_i.to(tl.float32)
77
+ m_i = tl.load(m_ptrs)
78
+ m_i = m_i.to(tl.float32)
79
+ peer_l_i = tl.load(peer_l_ptrs)
80
+ peer_l_i = peer_l_i.to(tl.float32)
81
+ l_i = tl.load(l_ptrs)
82
+ l_i = l_i.to(tl.float32)
83
+
84
+ peer_acc = tl.load(peer_o_block_ptr)
85
+ peer_acc = peer_acc.to(tl.float32)
86
+ acc = tl.load(o_block_ptr)
87
+ acc = acc.to(tl.float32)
88
+ lo = 0
89
+ hi = N_CTX
90
+ m_i_sync = tl.maximum(m_i, peer_m_i)
91
+ alpha = tl.math.exp2(m_i - m_i_sync)
92
+ peer_alpha = tl.math.exp2(peer_m_i - m_i_sync)
93
+ # -- scale and update acc --
94
+ acc_scale = l_i * 0 + alpha # workaround some compiler bug
95
+ peer_acc_scale = peer_l_i * 0 + peer_alpha # workaround some compiler bug
96
+
97
+ acc *= acc_scale[:, None]
98
+ peer_acc *= peer_acc_scale[:, None]
99
+ acc += peer_acc
100
+ l_i = l_i * acc_scale + peer_l_i * peer_acc_scale
101
+ # write back O, l, m
102
+ tl.store(m_ptrs, m_i_sync)
103
+ tl.store(l_ptrs, l_i)
104
+ if LAST_STEP:
105
+ acc = acc / l_i[:, None]
106
+ L_ptrs = L + off_hz * N_CTX + offs_m
107
+ tl.store(L_ptrs, m_i_sync / 1.44269504 + tl.math.log(l_i))
108
+ tl.store(o_block_ptr, acc.to(tl.bfloat16))
109
+
110
+ @triton.jit
111
+ def _fwd_kernel(
112
+ Q, K, V, sm_scale,
113
+ m,
114
+ l,
115
+ O,
116
+ L,
117
+ stride_qz, stride_qh, stride_qm, stride_qk,
118
+ stride_kz, stride_kh, stride_kn, stride_kk,
119
+ stride_vz, stride_vh, stride_vk, stride_vn,
120
+ stride_oz, stride_oh, stride_om, stride_on,
121
+ Z, H, N_CTX,
122
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
123
+ BLOCK_N: tl.constexpr,
124
+ IS_CAUSAL: tl.constexpr,
125
+ LAST_STEP: tl.constexpr
126
+ ):
127
+ start_m = tl.program_id(0)
128
+ off_hz = tl.program_id(1)
129
+ qvk_offset = off_hz * stride_qh
130
+ Q_block_ptr = tl.make_block_ptr(
131
+ base=Q + qvk_offset,
132
+ shape=(N_CTX, BLOCK_DMODEL),
133
+ strides=(stride_qm, stride_qk),
134
+ offsets=(start_m * BLOCK_M, 0),
135
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
136
+ order=(1, 0)
137
+ )
138
+ K_block_ptr = tl.make_block_ptr(
139
+ base=K + qvk_offset,
140
+ shape=(BLOCK_DMODEL, N_CTX),
141
+ strides=(stride_kk, stride_kn),
142
+ offsets=(0, 0),
143
+ block_shape=(BLOCK_DMODEL, BLOCK_N),
144
+ order=(0, 1)
145
+ )
146
+ V_block_ptr = tl.make_block_ptr(
147
+ base=V + qvk_offset,
148
+ shape=(N_CTX, BLOCK_DMODEL),
149
+ strides=(stride_vk, stride_vn),
150
+ offsets=(0, 0),
151
+ block_shape=(BLOCK_N, BLOCK_DMODEL),
152
+ order=(1, 0)
153
+ )
154
+ O_block_ptr = tl.make_block_ptr(
155
+ base=O + qvk_offset,
156
+ shape=(N_CTX, BLOCK_DMODEL),
157
+ strides=(stride_om, stride_on),
158
+ offsets=(start_m * BLOCK_M, 0),
159
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
160
+ order=(1, 0)
161
+ )
162
+ # initialize offsets
163
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
164
+ offs_n = tl.arange(0, BLOCK_N)
165
+ # initialize pointer to m and l -> load from provided pointer
166
+ m_ptrs = m + off_hz * N_CTX + offs_m
167
+ l_ptrs = l + off_hz * N_CTX + offs_m
168
+ m_i = tl.load(m_ptrs)
169
+ m_i = m_i.to(tl.float32)
170
+ l_i = tl.load(l_ptrs)
171
+ l_i = l_i.to(tl.float32)
172
+ acc = tl.load(O_block_ptr)
173
+ acc = acc.to(tl.float32)
174
+ # scale sm_scale by log_2(e) and use
175
+ # 2^x instead of exp in the loop because CSE and LICM
176
+ # don't work as expected with `exp` in the loop
177
+ qk_scale = sm_scale * 1.44269504
178
+ # load q: it will stay in SRAM throughout
179
+ q = tl.load(Q_block_ptr)
180
+ q = (q * qk_scale).to(tl.bfloat16)
181
+ # loop over k, v and update accumulator
182
+ lo = 0
183
+ hi = (start_m + 1) * BLOCK_M if IS_CAUSAL else N_CTX
184
+ for start_n in range(lo, hi, BLOCK_N):
185
+ # -- load k, v --
186
+ k = tl.load(K_block_ptr)
187
+ v = tl.load(V_block_ptr)
188
+ # -- compute qk ---
189
+ qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
190
+ if IS_CAUSAL:
191
+ qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf"))
192
+ qk += tl.dot(q, k)
193
+ # -- compute scaling constant ---
194
+ m_i_new = tl.maximum(m_i, tl.max(qk, 1))
195
+ alpha = tl.math.exp2(m_i - m_i_new)
196
+ p = tl.math.exp2(qk - m_i_new[:, None])
197
+ # -- scale and update acc --
198
+ acc_scale = l_i * 0 + alpha # workaround some compiler bug
199
+ acc *= acc_scale[:, None]
200
+ acc += tl.dot(p.to(tl.bfloat16), v)
201
+ # -- update m_i and l_i --
202
+ l_i = l_i * alpha + tl.sum(p, 1)
203
+ m_i = m_i_new
204
+ # update pointers
205
+ K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
206
+ V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
207
+ # write back original l and m
208
+ tl.store(m_ptrs, m_i)
209
+ tl.store(l_ptrs, l_i)
210
+ # write back O, L
211
+ if LAST_STEP:
212
+ acc = acc / l_i[:, None]
213
+ L_ptrs = L + off_hz * N_CTX + offs_m
214
+ tl.store(L_ptrs, m_i / 1.44269504 + tl.math.log(l_i))
215
+ tl.store(O_block_ptr, acc.to(tl.bfloat16))
216
+
217
+ # for gqa/mqa to expand kv heads
218
+ def maybe_repeat_kv_fwd(nqh, kv):
219
+ bs, nkvh, slen, hdim = kv.shape
220
+ n_rep = nqh // nkvh
221
+ if n_rep == 1:
222
+ return kv
223
+ kv_expand = kv[:, :, None, :, :].expand(bs, nkvh, n_rep, slen, hdim)
224
+ return kv_expand.reshape(bs, nkvh * n_rep, slen, hdim)
225
+
226
+ def maybe_repeat_kv_bwd(nqh, kv):
227
+ bs, slen, nkvh, hdim = kv.shape
228
+ n_rep = nqh // nkvh
229
+ if n_rep == 1:
230
+ return kv
231
+ kv_expand = kv[:, :, :, None, :].expand(bs, slen, nkvh, n_rep, hdim)
232
+ return kv_expand.reshape(bs, slen, nkvh * n_rep, hdim)
233
+
234
+ # kv grad has shape bs, slen, nqh, hdim
235
+ def maybe_reduce_dkv(nkvh, dkv):
236
+ bs, slen, nqh, hdim = dkv.shape
237
+ n_rep = nqh // nkvh
238
+ if n_rep == 1:
239
+ return dkv
240
+ dkv_reshape = dkv.view(bs, slen, nkvh, n_rep, hdim)
241
+ return torch.sum(dkv_reshape, dim=3)
242
+
243
+
244
+ def _lightseq_forward(q, k, v, causal, sm_scale, comm_mode):
245
+ # maybe_contiguous = lambda x: x.contiguous() if x.stride(-1) != 1 else x
246
+ # q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
247
+
248
+ # shape constraints
249
+ Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
250
+ assert Lq == Lk and Lk == Lv
251
+ assert Lk in {16, 32, 64, 128}
252
+ # Why do I have to change it from 128 64 to 32 32?
253
+ BLOCK_M = 32
254
+ BLOCK_N = 32
255
+
256
+ bsz, nh, seq_len, hdim = q.shape
257
+
258
+ m = torch.full((bsz * nh, seq_len), fill_value=-float("inf"), device=q.device, dtype=torch.float32)
259
+ l = torch.zeros_like(m)
260
+ L = torch.zeros_like(m)
261
+ o = torch.zeros_like(q)
262
+
263
+ grid = (triton.cdiv(seq_len, BLOCK_M), bsz * nh, 1)
264
+ num_warps = 4 if Lk <= 64 else 8
265
+
266
+ seq_rank = get_sequence_parallel_rank()
267
+ seq_world_size = get_sequence_parallel_size()
268
+
269
+ # Initialize all buffers
270
+ peer_q, peer_k, peer_v, peer_m, peer_l, peer_o = maybe_get_set_global_memory_buffer(q, k, v, m, l, o)
271
+
272
+ fwd_launch_helper = lambda q, k, v, m, l, o, L, IS_CAUSAL, LAST_STEP: _fwd_kernel[grid](
273
+ q, k, v, sm_scale,
274
+ m,
275
+ l,
276
+ o,
277
+ L,
278
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
279
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
280
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
281
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
282
+ q.shape[0], q.shape[1], q.shape[2],
283
+ BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk,
284
+ IS_CAUSAL=IS_CAUSAL,
285
+ LAST_STEP=LAST_STEP,
286
+ num_warps=num_warps,
287
+ num_stages=4)
288
+
289
+ for time_step in range(seq_world_size // 2 + 1):
290
+ # This is important for cuda scheduler to execute nccl calls first.
291
+ torch.cuda.synchronize()
292
+ # Communication uses buffer_idx_1, and compute uses buffer_idx_2, which effectively are contents from the last time step.
293
+ buffer_idx_1 = time_step % 2
294
+ buffer_idx_2 = (time_step - 1) % 2
295
+
296
+ reqs = maybe_send_recv_fwd_qkvo(q, peer_q[buffer_idx_1], k, peer_k[buffer_idx_1], v, peer_v[buffer_idx_1],
297
+ [peer_o[buffer_idx_1], peer_m[buffer_idx_1], peer_l[buffer_idx_1]], time_step, comm_mode)
298
+ if comm_mode == "sync":
299
+ # if seq_rank == 0:
300
+ # print("Immediate wait for abalation")
301
+ wait_async_handles(reqs)
302
+ if is_compute_for_local_query(time_step):
303
+ # print(f"t={time_step}: (Comp) R={seq_rank} local compute")
304
+ if time_step == 0:
305
+ fwd_launch_helper(q, maybe_repeat_kv_fwd(q.shape[1], k), maybe_repeat_kv_fwd(q.shape[1], v), m, l, o, L, True, is_last_time(time_step))
306
+ else:
307
+ # if needs to sync from others, do not normalize here
308
+ fwd_launch_helper(q, maybe_repeat_kv_fwd(q.shape[1], peer_k[buffer_idx_2]), maybe_repeat_kv_fwd(q.shape[1], peer_v[buffer_idx_2]), m, l, o, L, False, not is_sync_from_remote(time_step) and is_last_time(time_step))
309
+ elif is_idle(time_step):
310
+ # print(f"t={time_step}: (Comp) R={seq_rank} idle")
311
+ pass
312
+ else:
313
+ # print(f"t={time_step}: (Comp) R={seq_rank} helps other")
314
+ peer_m[buffer_idx_2] = torch.full_like(m, fill_value=-float("inf"))
315
+ peer_l[buffer_idx_2] = torch.zeros_like(l)
316
+ peer_o[buffer_idx_2] = torch.zeros_like(o)
317
+
318
+ #print(f"rank 3 q is: {peer_q[buffer_idx_2]}")
319
+ fwd_launch_helper(peer_q[buffer_idx_2], maybe_repeat_kv_fwd(q.shape[1], k), maybe_repeat_kv_fwd(q.shape[1], v), peer_m[buffer_idx_2], peer_l[buffer_idx_2], peer_o[buffer_idx_2], None, False, False)
320
+
321
+ if comm_mode == "lightseq":
322
+ # Make sure tensors for next steps are ready
323
+ wait_async_handles(reqs)
324
+ # sync between statistics get from other ranks and the local ones
325
+ if is_sync_from_remote(time_step):
326
+ _rescale_kernel[grid](
327
+ peer_m[buffer_idx_1],
328
+ m,
329
+ peer_l[buffer_idx_1],
330
+ l,
331
+ peer_o[buffer_idx_1],
332
+ o,
333
+ L,
334
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
335
+ o.shape[0], o.shape[1], o.shape[2],
336
+ BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk,
337
+ LAST_STEP=is_last_time(time_step),
338
+ num_warps=num_warps,
339
+ num_stages=4)
340
+ return q, k, v, o, L
341
+
342
+ def _lightseq_backward(do, q, k, v, o, L, sm_scale, comm_mode, backward_engine):
343
+ BLOCK = 128
344
+ q, k, v, o, do = [rearrange(_x, 'b h s d -> b s h d').contiguous() for _x in [q, k, v, o, do]]
345
+ L = rearrange(L, '(b h) s -> b h s', b=q.shape[0])
346
+
347
+ dq = torch.empty_like(q)
348
+ dk = torch.empty_like(k)
349
+ dv = torch.empty_like(v)
350
+
351
+ # maybe gqa
352
+ nqh = q.shape[2]
353
+ nkvh = k.shape[2]
354
+ is_gqa = (nqh > nkvh)
355
+
356
+ seq_rank = get_sequence_parallel_rank()
357
+ seq_world_size = get_sequence_parallel_size()
358
+
359
+ # Initialize all backward buffers
360
+ dq_delta, dk_delta, dv_delta, dk_delta_from_peer, dv_delta_from_peer, \
361
+ peer_q, peer_L, peer_k, peer_v, peer_o, peer_do = maybe_get_set_global_memory_buffer_bwd(dq, dk, dv, q, L, k, v, o, do)
362
+
363
+ for time_step in range(0, get_sequence_parallel_size() // 2 + 1):
364
+ torch.cuda.synchronize()
365
+ buffer_idx_1 = time_step % 2
366
+ buffer_idx_2 = (time_step - 1) % 2
367
+
368
+ reqs, is_update_dq, is_update_dkv = maybe_send_recv_bwd_qkvo(dq_delta[buffer_idx_1], dk_delta[buffer_idx_1], dv_delta[buffer_idx_1], dk_delta_from_peer, dv_delta_from_peer, q, peer_q[buffer_idx_1], L, peer_L[buffer_idx_1], k, peer_k[buffer_idx_1], v, peer_v[buffer_idx_1], o, peer_o[buffer_idx_1], do, peer_do[buffer_idx_1], time_step, comm_mode)
369
+ if comm_mode == "sync":
370
+ # if seq_rank == 0:
371
+ # print("(bwd) Immediate wait for abalation")
372
+ wait_async_handles(reqs)
373
+
374
+ if is_compute_for_local_query(time_step):
375
+ if time_step == 0:
376
+ if backward_engine == "flash":
377
+ _flash_attn_backward(do, q, k, v, o, L, dq, dk, dv, 0.0, sm_scale, True, (-1,-1), None, False)
378
+ else:
379
+ inp = Inputs(query=q, key=maybe_repeat_kv_bwd(q.shape[2], k), value=maybe_repeat_kv_bwd(q.shape[2], v), attn_bias=xformers.ops.LowerTriangularMask(), p=0, scale=sm_scale)
380
+ op_ctx = Context(lse=L, out=o, rng_state=None)
381
+ # Let xformers dispatch the correct backend
382
+ grads = _memory_efficient_attention_backward(ctx=op_ctx, inp=inp, grad=do, op=None)
383
+ dq = grads.dq
384
+ dk, dv = maybe_reduce_dkv(nkvh, grads.dk), maybe_reduce_dkv(nkvh, grads.dv)
385
+ else:
386
+ if backward_engine == "flash":
387
+ _flash_attn_backward(do, q, peer_k[buffer_idx_2], peer_v[buffer_idx_2], o, L, dq_delta[buffer_idx_2], dk_delta[buffer_idx_2], dv_delta[buffer_idx_2], 0.0, sm_scale, False, (-1,-1), None, False)
388
+ else:
389
+ inp = Inputs(query=q, key=maybe_repeat_kv_bwd(q.shape[2], peer_k[buffer_idx_2]), value=maybe_repeat_kv_bwd(q.shape[2], peer_v[buffer_idx_2]), attn_bias=None, p=0, scale=sm_scale)
390
+ op_ctx = Context(lse=L, out=o, rng_state=None)
391
+ grads = _memory_efficient_attention_backward(ctx=op_ctx, inp=inp, grad=do, op=None)
392
+ dq_delta[buffer_idx_2] = grads.dq
393
+ dk_delta[buffer_idx_2], dv_delta[buffer_idx_2] = maybe_reduce_dkv(nkvh, grads.dk), maybe_reduce_dkv(nkvh, grads.dv)
394
+ dq += dq_delta[buffer_idx_2]
395
+ elif is_idle(time_step):
396
+ pass
397
+ else:
398
+ if backward_engine == "flash":
399
+ _flash_attn_backward(peer_do[buffer_idx_2], peer_q[buffer_idx_2], k, v, peer_o[buffer_idx_2], peer_L[buffer_idx_2], dq_delta[buffer_idx_2], dk_delta[buffer_idx_2], dv_delta[buffer_idx_2], 0.0, sm_scale, False, (-1,-1), None, False)
400
+ else:
401
+ inp = Inputs(query=peer_q[buffer_idx_2], key=maybe_repeat_kv_bwd(q.shape[2], k), value=maybe_repeat_kv_bwd(q.shape[2], v), attn_bias=None, p=0, scale=sm_scale)
402
+ op_ctx = Context(lse=peer_L[buffer_idx_2], out=peer_o[buffer_idx_2], rng_state=None)
403
+ grads = _memory_efficient_attention_backward(ctx=op_ctx, inp=inp, grad=peer_do[buffer_idx_2], op=None)
404
+ dq_delta[buffer_idx_2] = grads.dq
405
+ dk_delta[buffer_idx_2], dv_delta[buffer_idx_2] = maybe_reduce_dkv(nkvh, grads.dk), maybe_reduce_dkv(nkvh, grads.dv)
406
+ dk += dk_delta[buffer_idx_2]
407
+ dv += dv_delta[buffer_idx_2]
408
+
409
+ if comm_mode == "lightseq":
410
+ # Make sure tensors for next steps are ready
411
+ wait_async_handles(reqs)
412
+
413
+ # The last time step needs to send dk and dv immediately, move it up here to maximize overlap with the following three addition.
414
+ reqs, is_update_last_dkv = maybe_send_recv_bwd_last_dkv(dk_delta[buffer_idx_2], dv_delta[buffer_idx_2], time_step, comm_mode)
415
+
416
+ if comm_mode == "sync":
417
+ # if seq_rank == 0:
418
+ # print("(bwd) dkv Immediate wait for abalation")
419
+ wait_async_handles(reqs)
420
+ # apply dq_delta, dk_delta and dv_delta from remote
421
+ if is_update_dq:
422
+ dq += dq_delta[buffer_idx_1]
423
+ if is_update_dkv:
424
+ dk += dk_delta_from_peer
425
+ dv += dv_delta_from_peer
426
+
427
+ if comm_mode == "lightseq":
428
+ wait_async_handles(reqs)
429
+ # apply dk_delta and dv_delta to sender
430
+ if is_update_last_dkv:
431
+ dk += dk_delta[buffer_idx_2]
432
+ dv += dv_delta[buffer_idx_2]
433
+
434
+ dq, dk, dv = [rearrange(_x, 'b h s d -> b s h d') for _x in [dq, dk, dv]]
435
+ return dq, dk, dv
436
+
437
+ class _attention(torch.autograd.Function):
438
+ @staticmethod
439
+ def forward(ctx, q, k, v, causal, sm_scale):
440
+ try:
441
+ global args
442
+ comm_mode = args.comm_mode
443
+ backward_engine = args.backward_engine
444
+ except:
445
+ comm_mode = 'lightseq'
446
+ backward_engine = 'flash'
447
+
448
+ q, k, v, o, L = _lightseq_forward(q, k, v, causal, sm_scale, comm_mode)
449
+
450
+ ctx.save_for_backward(q, k, v, o, L)
451
+ ctx.sm_scale = sm_scale
452
+ ctx.comm_mode = comm_mode
453
+ ctx.backward_engine = backward_engine
454
+ return o
455
+
456
+ @staticmethod
457
+ def backward(ctx, do):
458
+ q, k, v, o, L = ctx.saved_tensors
459
+ sm_scale = ctx.sm_scale
460
+
461
+ dq, dk, dv = _lightseq_backward(do, q, k, v, o, L, sm_scale, ctx.comm_mode, ctx.backward_engine)
462
+ return dq, dk, dv, None, None
463
+
464
+ attention = _attention.apply
465
+
466
+
467
+ #@pytest.mark.parametrize('causal', [False, True])
468
+ #@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(6, 9, 1024, 64)])
469
+ def test_op(Z, H, N_CTX, D_HEAD, causal, dtype=torch.bfloat16):
470
+ torch.manual_seed(20)
471
+ q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
472
+ k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
473
+ v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
474
+
475
+ rank = dist.get_rank()
476
+ world_size = dist.get_world_size()
477
+ seq_per_rank = N_CTX // world_size
478
+
479
+ sm_scale = 0.5
480
+ dout = torch.randn_like(q)
481
+ # reference implementation
482
+ M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
483
+ p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
484
+ assert causal
485
+ if causal:
486
+ p[:, :, M == 0] = float("-inf")
487
+ p = torch.softmax(p.float(), dim=-1).half()
488
+ ref_out = torch.matmul(p, v)
489
+ ref_out.backward(dout)
490
+ ref_dv, v.grad = v.grad.clone(), None
491
+ ref_dk, k.grad = k.grad.clone(), None
492
+ ref_dq, q.grad = q.grad.clone(), None
493
+
494
+ # triton implementation
495
+
496
+ a, b, c, d = q.size()
497
+ real_q = q[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True)
498
+ real_k = k[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True)
499
+ real_v = v[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True)
500
+ real_do = dout[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True)
501
+
502
+ tri_out = attention(real_q, real_k, real_v, causal, sm_scale).half()
503
+
504
+ # compare
505
+ assert torch.allclose(ref_out[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_out, atol=1e-2, rtol=0), f" rank {rank} fails forward"
506
+ print(f" *** rank {rank} passes forward")
507
+ tri_out.backward(real_do)
508
+ tri_dv, real_v.grad = real_v.grad.clone(), None
509
+ tri_dk, real_k.grad = real_k.grad.clone(), None
510
+ tri_dq, real_q.grad = real_q.grad.clone(), None
511
+ assert torch.allclose(ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dq, atol=1e-2, rtol=0), f" rank {rank} fails backward dq"
512
+ assert torch.allclose(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dk, atol=1e-2, rtol=0), f"rank {rank} fails backward dk" #f" {ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dk} {torch.max(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dk)} rank {rank} fails backward dk"
513
+ assert torch.allclose(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dv, atol=1e-2, rtol=0), f"rank {rank} fails backward dv {ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dv} {torch.max(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dv)} rank {rank} fails backward dv"
514
+ print(f"rank {rank} passes backward")
515
+
516
+
517
+ def test_gqa(Z, H, KVH, N_CTX, D_HEAD, causal, dtype=torch.bfloat16):
518
+ torch.manual_seed(177)
519
+ q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
520
+ k = torch.empty((Z, KVH, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
521
+ v = torch.empty((Z, KVH, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
522
+
523
+ rank = dist.get_rank()
524
+ world_size = dist.get_world_size()
525
+ seq_per_rank = N_CTX // world_size
526
+
527
+ sm_scale = 0.5
528
+ dout = torch.randn_like(q)
529
+ # torch reference implementation
530
+ M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
531
+ ref_k = maybe_repeat_kv_fwd(q.shape[1], k).clone().detach().requires_grad_(True)
532
+ ref_v = maybe_repeat_kv_fwd(q.shape[1], v).clone().detach().requires_grad_(True)
533
+ p = torch.matmul(q, ref_k.transpose(2,3)) * sm_scale
534
+ assert causal
535
+ if causal:
536
+ p[:, :, M == 0] = float("-inf")
537
+ p = torch.softmax(p.float(), dim=-1).half()
538
+ ref_out = torch.matmul(p, ref_v)
539
+ ref_out.backward(dout)
540
+ ref_dv, v.grad = ref_v.grad.clone(), None
541
+ ref_dv = (maybe_reduce_dkv(KVH, ref_dv.transpose(1,2))).transpose(1,2)
542
+ ref_dk, k.grad = ref_k.grad.clone(), None
543
+ ref_dk = (maybe_reduce_dkv(KVH, ref_dk.transpose(1,2))).transpose(1,2)
544
+ ref_dq, q.grad = q.grad.clone(), None
545
+
546
+ # flash reference
547
+ from flash_attn import flash_attn_qkvpacked_func, flash_attn_func
548
+ flash_q = q.transpose(1,2).clone().detach().requires_grad_(True)
549
+ flash_k = k.transpose(1,2).clone().detach().requires_grad_(True)
550
+ flash_v = v.transpose(1,2).clone().detach().requires_grad_(True)
551
+ flash_ref_out = flash_attn_func(flash_q, flash_k, flash_v, 0, sm_scale, True)
552
+ flash_ref_out.backward(dout.transpose(1,2))
553
+ flash_ref_out = flash_ref_out.transpose(1,2)
554
+ flash_ref_dv, v.grad = flash_v.grad.clone(), None
555
+ flash_ref_dv = flash_ref_dv.transpose(1,2)
556
+ flash_ref_dk, k.grad = flash_k.grad.clone(), None
557
+ flash_ref_dk = flash_ref_dk.transpose(1,2)
558
+ flash_ref_dq, q.grad = flash_q.grad.clone(), None
559
+ flash_ref_dq = flash_ref_dq.transpose(1,2)
560
+
561
+ # triton implementation
562
+
563
+ a, b, c, d = q.size()
564
+ real_q = q[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True)
565
+ real_k = k[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, KVH, -1, d).contiguous().clone().detach().requires_grad_(True)
566
+ real_v = v[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, KVH, -1, d).contiguous().clone().detach().requires_grad_(True)
567
+ real_do = dout[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True)
568
+
569
+ tri_out = attention(real_q, real_k, real_v, causal, sm_scale).half()
570
+
571
+ # compare
572
+ assert torch.allclose(flash_ref_out[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_out, atol=1e-2, rtol=0), f" rank {rank} fails forward against flash"
573
+ print(f" *** rank {rank} passes forward")
574
+ tri_out.backward(real_do)
575
+ tri_dv, real_v.grad = real_v.grad.clone(), None
576
+ tri_dk, real_k.grad = real_k.grad.clone(), None
577
+ tri_dq, real_q.grad = real_q.grad.clone(), None
578
+ assert torch.allclose(flash_ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dq, atol=1e-2, rtol=0), f" rank {rank} fails backward dq against flash"
579
+ #print(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].shape, ref_dk.shape, tri_dk.shape)
580
+ assert torch.allclose(flash_ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dk, atol=1e-2, rtol=0), f"rank {rank} fails backward dk against flash {flash_ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dk} {torch.max(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dk)} rank {rank} fails backward dk"
581
+ assert torch.allclose(flash_ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dv, atol=1e-2, rtol=0), f"rank {rank} fails backward dv against flash {flash_ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dv} {torch.max(flash_ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dv)} rank {rank} fails backward dv"
582
+ print(f"rank {rank} passes backward against flash")
583
+
584
+ assert torch.allclose(ref_out[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_out, atol=1e-2, rtol=0), f" rank {rank} fails forward"
585
+ print(f" *** rank {rank} passes forward")
586
+ assert torch.allclose(ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dq, atol=1e-2, rtol=0), f" rank {rank} fails backward dq"
587
+ #print(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].shape, ref_dk.shape, tri_dk.shape)
588
+ assert torch.allclose(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dk, atol=1e-2, rtol=0), f"rank {rank} fails backward dk {ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dk} {torch.max(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dk)} rank {rank} fails backward dk"
589
+ assert torch.allclose(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dv, atol=1e-2, rtol=0), f"rank {rank} fails backward dv {ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dv} {torch.max(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dv)} rank {rank} fails backward dv"
590
+ print(f"rank {rank} passes backward")
591
+
592
+ #BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64
593
+ try:
594
+ from flash_attn.flash_attn_interface import \
595
+ flash_attn_qkvpacked_func as flash_attn_func
596
+ FLASH_VER = 2
597
+ except BaseException:
598
+ try:
599
+ from flash_attn.flash_attn_interface import flash_attn_func
600
+ FLASH_VER = 1
601
+ except BaseException:
602
+ FLASH_VER = None
603
+ HAS_FLASH = FLASH_VER is not None
604
+ HAS_FLASH = None
605
+ ONLY_FLASH = False
606
+
607
+ #BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64
608
+ BATCH, N_HEADS, N_CTX, D_HEAD = 1, 32, None, 128
609
+ # vary seq length for fixed head and batch=4
610
+ configs = [triton.testing.Benchmark(
611
+ x_names=['N_CTX'],
612
+ x_vals=[2**i for i in range(18, 19)],#[ 20, 21]],#[10, 11, 12, 13, 14, 15, 16, 17, 18]],
613
+ line_arg='provider',
614
+ line_vals=['triton'] if not ONLY_FLASH else [] + (['flash'] if HAS_FLASH else []),
615
+ line_names=['Triton'] if not ONLY_FLASH else [] + ([f'Flash-{FLASH_VER}'] if HAS_FLASH else []),
616
+ styles=[('red', '-'), ('blue', '-')],
617
+ ylabel='ms',
618
+ plot_name=f'fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-{mode}-{causal}',
619
+ args={'H': N_HEADS, 'BATCH': BATCH, 'D_HEAD': D_HEAD, 'dtype': torch.bfloat16, 'mode': mode, 'causal': causal}
620
+ ) for mode in ["all"] for causal in [True]]
621
+
622
+ # @triton.testing.perf_report(configs)
623
+ def bench_flash_attention(BATCH, H, KVH, N_CTX, D_HEAD, causal, mode, provider, args, dtype=torch.bfloat16, device="cuda"):
624
+ assert mode == "all" #mode in ['fwd', 'bwd']
625
+ n_warmup = 10
626
+ n_repeat = 10
627
+ cache = torch.empty(int(256e6), dtype=torch.int8, device='cuda')
628
+ seq_rank = get_sequence_parallel_rank()
629
+ seq_world_size = get_sequence_parallel_size()
630
+ if provider == "triton":
631
+ q = torch.randn((BATCH, H, N_CTX // seq_world_size, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
632
+ k = torch.randn((BATCH, KVH, N_CTX // seq_world_size, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
633
+ v = torch.randn((BATCH, KVH, N_CTX // seq_world_size, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
634
+ if seq_rank == 0:
635
+ print(f"Benchmarking per GPU qkv shape: {q.shape}")
636
+ sm_scale = 1.3
637
+ fwd_fn = lambda: attention(q, k, v, causal, sm_scale)
638
+ if provider == "flash":
639
+ qkv = torch.randn((BATCH, N_CTX, 3, H, D_HEAD), dtype=dtype, device=device, requires_grad=True)
640
+ if FLASH_VER == 1:
641
+ lengths = torch.full((BATCH,), fill_value=N_CTX, device=device)
642
+ cu_seqlens = torch.zeros((BATCH + 1,), device=device, dtype=torch.int32)
643
+ cu_seqlens[1:] = lengths.cumsum(0)
644
+ qkv = qkv.reshape(BATCH * N_CTX, 3, H, D_HEAD)
645
+ fwd_fn = lambda: flash_attn_func(qkv, cu_seqlens, 0., N_CTX, causal=causal)
646
+ elif FLASH_VER == 2:
647
+ fwd_fn = lambda: flash_attn_func(qkv, causal=causal)
648
+ else:
649
+ raise ValueError(f'unknown {FLASH_VER = }')
650
+
651
+ flops_per_matmul = 2. * BATCH * H * N_CTX * N_CTX * D_HEAD / seq_world_size
652
+ attn_flops = 2 * flops_per_matmul
653
+
654
+ assert causal
655
+ if causal:
656
+ attn_flops *= 0.5
657
+ fwd_flops = attn_flops
658
+ bwd_flops = attn_flops * 2.5 # 2.0(bwd) + 0.5(recompute)
659
+
660
+ o = fwd_fn()
661
+ do = torch.randn_like(o)
662
+ bwd_fn = lambda: o.backward(do, retain_graph=True)
663
+
664
+ def run_benchmark(fn):
665
+ time_list = []
666
+ for _ in tqdm(range(n_warmup)):
667
+ cache.zero_()
668
+ fn()
669
+ torch.cuda.synchronize()
670
+ if args.debug:
671
+ print_and_reset_comm_stats()
672
+ for i in tqdm(range(n_repeat)):
673
+ cache.zero_()
674
+ torch.cuda.synchronize()
675
+ time_s = time.time()
676
+ fn()
677
+ torch.cuda.synchronize()
678
+ time_e = time.time()
679
+ time_list.append((time_e - time_s) * 1000.0)
680
+ if args.debug:
681
+ print_and_reset_comm_stats()
682
+ return np.asarray(time_list)
683
+
684
+ fwd_time_arr = run_benchmark(fwd_fn)
685
+ bwd_time_arr = run_benchmark(bwd_fn)
686
+
687
+ fwd_flops_ps = fwd_flops / np.mean(fwd_time_arr) * 1e-9
688
+ print(f"(FWD) R={seq_rank} avg: {np.mean(fwd_time_arr)}, std: {np.std(fwd_time_arr)} flops: {fwd_flops_ps} \n")
689
+
690
+ bwd_flops_ps = bwd_flops / np.mean(bwd_time_arr) * 1e-9
691
+ print(f"(BWD) R={seq_rank} avg: {np.mean(bwd_time_arr)}, std: {np.std(bwd_time_arr)} flops: {bwd_flops_ps} \n")
692
+
693
+ # total
694
+ total_time_arr = fwd_time_arr + bwd_time_arr
695
+ total_flops = fwd_flops + bwd_flops
696
+ total_flops_ps = total_flops / np.mean(total_time_arr) * 1e-9
697
+ print(f"(Total) R={seq_rank} avg: {np.mean(total_time_arr)}, std: {np.std(total_time_arr)} flops: {total_flops_ps} \n")
698
+
699
+ #return total_flops_ps
700
+
701
+
702
+ if __name__ == "__main__":
703
+ parser = argparse.ArgumentParser()
704
+ parser.add_argument("--comm-mode", type=str, default="lightseq")
705
+ parser.add_argument("--debug", action="store_true")
706
+ parser.add_argument("--run-mode", type=str, default="benchmark")
707
+ parser.add_argument("--bs", type=int, default=1)
708
+ parser.add_argument("--n_heads", type=int, default=32)
709
+ parser.add_argument("--n_kvheads", type=int, default=32)
710
+ parser.add_argument("--d_head", type=int, default=128)
711
+ parser.add_argument("--start_ctx", type=int, default=12)
712
+ parser.add_argument("--end_ctx", type=int, default=18)
713
+ parser.add_argument("--forward_engine", type=str, default="triton")
714
+ parser.add_argument("--backward_engine", type=str, default="flash")
715
+
716
+ global args
717
+ args = parser.parse_args()
718
+ initialize_distributed()
719
+
720
+ assert args.forward_engine == "triton", "Only triton forward is implmented."
721
+ assert args.backward_engine in ["flash", "xformers"], "Only flash or xformers backward is implemented."
722
+
723
+ if args.backward_engine == "flash":
724
+ from flash_attn.flash_attn_interface import _flash_attn_forward, _flash_attn_backward
725
+ else:
726
+ try:
727
+ import xformers.ops
728
+ from xformers.ops.fmha.common import Inputs, Context
729
+ from xformers.ops.fmha import _memory_efficient_attention_backward
730
+ from xformers.ops.fmha import cutlass, flash
731
+ except ImportError:
732
+ print("xformers not found! Please install it before trying to use it.")
733
+
734
+ if args.run_mode == "benchmark":
735
+ for N_CTX in [2**i for i in range(args.start_ctx, args.end_ctx)]:
736
+ bench_flash_attention(args.bs, args.n_heads, args.n_kvheads, N_CTX, args.d_head, True, "all", "triton", args)#.run(save_path='.', print_data=True)
737
+ reset_global_memory_buffer()
738
+ else:
739
+ assert args.run_mode == "test"
740
+ for N_CTX in [2048, 4096]:
741
+ test_op(1, 16, N_CTX, 128, True)
742
+ #test_gqa(1, 16, 8, N_CTX, 128, True)
743
+ reset_global_memory_buffer()
vision_niah_d/easy_context/dist_flash_attn/lightseq_async_attn_varlen.py ADDED
@@ -0,0 +1,772 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import math
3
+
4
+ from einops import rearrange
5
+ import argparse
6
+
7
+ import pytest
8
+ import torch
9
+ import torch.distributed as dist
10
+ from torch.distributed import ReduceOp
11
+ #from torch.profiler import profile, record_function, ProfilerActivity
12
+
13
+ import triton
14
+ import triton.language as tl
15
+ import time
16
+ import numpy as np
17
+ from tqdm import tqdm
18
+
19
+ try:
20
+ from flash_attn.flash_attn_interface import _flash_attn_varlen_backward
21
+ except:
22
+ pass
23
+
24
+ from .async_communication import (is_last_time, is_compute_for_local_query, is_sync_from_remote, is_idle, print_and_reset_comm_stats,
25
+ launch_async_handles, wait_async_handles, maybe_send_recv_fwd_qkvo, maybe_send_recv_bwd_qkvo, maybe_send_recv_bwd_last_dkv, reset_global_memory_buffer,
26
+ maybe_get_set_global_memory_buffer, maybe_get_set_global_memory_buffer_bwd, initialize_distributed, get_sequence_parallel_size, get_sequence_parallel_rank)
27
+
28
+ @triton.jit
29
+ def max_fn(x, y):
30
+ return tl.math.max(x, y)
31
+
32
+ @triton.jit
33
+ def _rescale_kernel(
34
+ peer_m,
35
+ m,
36
+ peer_l,
37
+ l,
38
+ peer_o,
39
+ o,
40
+ L,
41
+ stride_oz, stride_oh, stride_om, stride_on,
42
+ Z, H, N_CTX,
43
+ seqlen_q_rounded, seqlen_peer_q_rounded,
44
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
45
+ BLOCK_N: tl.constexpr,
46
+ LAST_STEP: tl.constexpr,
47
+ ):
48
+ start_m = tl.program_id(0)
49
+ off_hz = tl.program_id(1)
50
+ o_offset = off_hz * stride_oh
51
+ peer_o_block_ptr = tl.make_block_ptr(
52
+ base=peer_o + o_offset,
53
+ shape=(N_CTX, BLOCK_DMODEL),
54
+ strides=(stride_om, stride_on),
55
+ offsets=(start_m * BLOCK_M, 0),
56
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
57
+ order=(1, 0)
58
+ )
59
+ o_block_ptr = tl.make_block_ptr(
60
+ base=o + o_offset,
61
+ shape=(N_CTX, BLOCK_DMODEL),
62
+ strides=(stride_om, stride_on),
63
+ offsets=(start_m * BLOCK_M, 0),
64
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
65
+ order=(1, 0)
66
+ )
67
+ # initialize offsets
68
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
69
+ offs_n = tl.arange(0, BLOCK_N)
70
+
71
+ peer_m_ptrs = peer_m + off_hz * seqlen_peer_q_rounded + offs_m
72
+ m_ptrs = m + off_hz * seqlen_q_rounded + offs_m
73
+ peer_l_ptrs = peer_l + off_hz * seqlen_peer_q_rounded + offs_m
74
+ l_ptrs = l + off_hz * seqlen_q_rounded + offs_m
75
+
76
+ peer_m_i = tl.load(peer_m_ptrs)
77
+ peer_m_i = peer_m_i.to(tl.float32)
78
+ m_i = tl.load(m_ptrs)
79
+ m_i = m_i.to(tl.float32)
80
+ peer_l_i = tl.load(peer_l_ptrs)
81
+ peer_l_i = peer_l_i.to(tl.float32)
82
+ l_i = tl.load(l_ptrs)
83
+ l_i = l_i.to(tl.float32)
84
+
85
+ peer_acc = tl.load(peer_o_block_ptr)#, boundary_check=(0, 1), padding_option='zero')
86
+ peer_acc = peer_acc.to(tl.float32)
87
+ acc = tl.load(o_block_ptr) #, boundary_check=(0, 1), padding_option='zero')
88
+ acc = acc.to(tl.float32)
89
+ lo = 0
90
+ hi = N_CTX
91
+ m_i_sync = tl.maximum(m_i, peer_m_i)
92
+ alpha = tl.math.exp2(m_i - m_i_sync)
93
+ peer_alpha = tl.math.exp2(peer_m_i - m_i_sync)
94
+ # -- scale and update acc --
95
+ acc_scale = l_i * 0 + alpha # workaround some compiler bug
96
+ peer_acc_scale = peer_l_i * 0 + peer_alpha # workaround some compiler bug
97
+
98
+ acc *= acc_scale[:, None]
99
+ peer_acc *= peer_acc_scale[:, None]
100
+ acc += peer_acc
101
+ l_i = l_i * acc_scale + peer_l_i * peer_acc_scale
102
+ # write back O, l, m
103
+ tl.store(m_ptrs, m_i_sync)
104
+ tl.store(l_ptrs, l_i)
105
+ if LAST_STEP:
106
+ acc = acc / l_i[:, None]
107
+ L_ptrs = L + off_hz * N_CTX + offs_m
108
+ tl.store(L_ptrs, m_i_sync / 1.44269504 + tl.math.log(l_i))
109
+ tl.store(o_block_ptr, acc.to(tl.bfloat16), boundary_check=(0, 1))
110
+
111
+ @triton.jit
112
+ def _fwd_kernel(
113
+ Q, K, V, sm_scale,
114
+ m,
115
+ l,
116
+ O,
117
+ L,
118
+ stride_qz, stride_qh, stride_qm, stride_qk,
119
+ stride_kz, stride_kh, stride_kn, stride_kk,
120
+ stride_vz, stride_vh, stride_vk, stride_vn,
121
+ stride_oz, stride_oh, stride_om, stride_on,
122
+ Z, H, N_CTX,
123
+ seqlen_q_rounded,
124
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
125
+ BLOCK_N: tl.constexpr,
126
+ IS_CAUSAL: tl.constexpr,
127
+ LAST_STEP: tl.constexpr
128
+ ):
129
+ start_m = tl.program_id(0)
130
+ off_hz = tl.program_id(1)
131
+ qvk_offset = off_hz * stride_qh
132
+ Q_block_ptr = tl.make_block_ptr(
133
+ base=Q + qvk_offset,
134
+ shape=(N_CTX, BLOCK_DMODEL),
135
+ strides=(stride_qm, stride_qk),
136
+ offsets=(start_m * BLOCK_M, 0),
137
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
138
+ order=(1, 0)
139
+ )
140
+ K_block_ptr = tl.make_block_ptr(
141
+ base=K + qvk_offset,
142
+ shape=(BLOCK_DMODEL, N_CTX),
143
+ strides=(stride_kk, stride_kn),
144
+ offsets=(0, 0),
145
+ block_shape=(BLOCK_DMODEL, BLOCK_N),
146
+ order=(0, 1)
147
+ )
148
+ V_block_ptr = tl.make_block_ptr(
149
+ base=V + qvk_offset,
150
+ shape=(N_CTX, BLOCK_DMODEL),
151
+ strides=(stride_vk, stride_vn),
152
+ offsets=(0, 0),
153
+ block_shape=(BLOCK_N, BLOCK_DMODEL),
154
+ order=(1, 0)
155
+ )
156
+ O_block_ptr = tl.make_block_ptr(
157
+ base=O + qvk_offset,
158
+ shape=(N_CTX, BLOCK_DMODEL),
159
+ strides=(stride_om, stride_on),
160
+ offsets=(start_m * BLOCK_M, 0),
161
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
162
+ order=(1, 0)
163
+ )
164
+ # initialize offsets
165
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
166
+ offs_n = tl.arange(0, BLOCK_N)
167
+ # initialize pointer to m and l -> load from provided pointer
168
+ # (TODO): Why float32?
169
+ m_ptrs = m + off_hz * seqlen_q_rounded + offs_m
170
+ l_ptrs = l + off_hz * seqlen_q_rounded + offs_m
171
+ m_i = tl.load(m_ptrs)
172
+ m_i = m_i.to(tl.float32)
173
+ l_i = tl.load(l_ptrs)
174
+ l_i = l_i.to(tl.float32)
175
+ acc = tl.load(O_block_ptr)
176
+ acc = acc.to(tl.float32)
177
+ # scale sm_scale by log_2(e) and use
178
+ # 2^x instead of exp in the loop because CSE and LICM
179
+ # don't work as expected with `exp` in the loop
180
+ qk_scale = sm_scale * 1.44269504
181
+ # load q: it will stay in SRAM throughout
182
+ q = tl.load(Q_block_ptr, boundary_check=(0,), padding_option='zero')
183
+ q = (q * qk_scale).to(tl.bfloat16)
184
+ # loop over k, v and update accumulator
185
+ lo = 0
186
+ hi = (start_m + 1) * BLOCK_M if IS_CAUSAL else N_CTX
187
+ for start_n in range(lo, hi, BLOCK_N):
188
+ # -- load k, v --
189
+ k = tl.load(K_block_ptr, boundary_check=(1,), padding_option='zero')
190
+ v = tl.load(V_block_ptr, boundary_check=(0,), padding_option='zero')
191
+ # -- compute qk ---
192
+ qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
193
+ if IS_CAUSAL:
194
+ qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf"))
195
+ qk += tl.dot(q, k)
196
+ # -- compute scaling constant ---
197
+ m_i_new = tl.maximum(m_i, tl.max(qk, 1))
198
+ alpha = tl.math.exp2(m_i - m_i_new)
199
+ p = tl.math.exp2(qk - m_i_new[:, None])
200
+ # -- scale and update acc --
201
+ acc_scale = l_i * 0 + alpha # workaround some compiler bug
202
+ acc *= acc_scale[:, None]
203
+ acc += tl.dot(p.to(tl.bfloat16), v)
204
+ # -- update m_i and l_i --
205
+ l_i = l_i * alpha + tl.sum(p, 1)
206
+ m_i = m_i_new
207
+ # update pointers
208
+ K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
209
+ V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
210
+ # write back original l and m
211
+ tl.store(m_ptrs, m_i)
212
+ tl.store(l_ptrs, l_i)
213
+ # write back O, L
214
+ if LAST_STEP:
215
+ acc = acc / l_i[:, None]
216
+ L_ptrs = L + off_hz * seqlen_q_rounded + offs_m
217
+ tl.store(L_ptrs, m_i / 1.44269504 + tl.math.log(l_i))
218
+ tl.store(O_block_ptr, acc.to(tl.bfloat16), boundary_check=(0, 1))
219
+
220
+ # for gqa/mqa to expand kv heads
221
+ def maybe_repeat_kv_fwd(nqh, kv):
222
+ bs, nkvh, slen, hdim = kv.shape
223
+ n_rep = nqh // nkvh
224
+ if n_rep == 1:
225
+ return kv
226
+ kv_expand = kv[:, :, None, :, :].expand(bs, nkvh, n_rep, slen, hdim)
227
+ return kv_expand.reshape(bs, nkvh * n_rep, slen, hdim)
228
+
229
+ def maybe_repeat_kv_bwd(nqh, kv):
230
+ bs, slen, nkvh, hdim = kv.shape
231
+ n_rep = nqh // nkvh
232
+ if n_rep == 1:
233
+ return kv
234
+ kv_expand = kv[:, :, :, None, :].expand(bs, slen, nkvh, n_rep, hdim)
235
+ return kv_expand.reshape(bs, slen, nkvh * n_rep, hdim)
236
+
237
+ # kv grad has shape bs, slen, nqh, hdim
238
+ def maybe_reduce_dkv(nkvh, dkv):
239
+ bs, slen, nqh, hdim = dkv.shape
240
+ n_rep = nqh // nkvh
241
+ if n_rep == 1:
242
+ return dkv
243
+ #print("*"*100, dkv.shape, bs, slen, nkvh, n_rep, hdim)
244
+ dkv_reshape = dkv.view(bs, slen, nkvh, n_rep, hdim)
245
+ #print("-"*100, dkv_reshape.shape, bs, slen, nkvh, n_rep, hdim)
246
+ return torch.sum(dkv_reshape, dim=3)
247
+
248
+
249
+ def _lightseq_forward_varlen(q, k, v, causal, sm_scale, comm_mode):
250
+ # maybe_contiguous = lambda x: x.contiguous() if x.stride(-1) != 1 else x
251
+ # q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
252
+
253
+ # shape constraints
254
+ Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
255
+ # assert Lq == Lk and Lk == Lv
256
+ # assert Lk in {16, 32, 64, 128}
257
+ BLOCK_M = 128
258
+ BLOCK_N = 64
259
+
260
+ bsz, nh, unpadded_seq_len, hdim = q.shape
261
+ cu_seq_lens = torch.arange(0, (bsz+1) * unpadded_seq_len, unpadded_seq_len, dtype=torch.int32, device=q.device)
262
+ max_seqlen = unpadded_seq_len
263
+ seqlen_q_rounded = math.ceil(q.shape[2] / BLOCK_M) * BLOCK_M
264
+
265
+ m = torch.full((bsz * nh, seqlen_q_rounded), fill_value=-float("inf"), device=q.device, dtype=torch.float32)
266
+ l = torch.zeros((bsz * nh, seqlen_q_rounded), device=q.device, dtype=torch.float32)
267
+ L = torch.zeros((bsz * nh, seqlen_q_rounded), device=q.device, dtype=torch.float32)
268
+ o = torch.zeros_like(q)
269
+
270
+ grid = (triton.cdiv(q.shape[2], BLOCK_M), bsz * nh, 1)
271
+ num_warps = 4 if Lk <= 64 else 8
272
+
273
+ seq_rank = get_sequence_parallel_rank()
274
+ seq_world_size = get_sequence_parallel_size()
275
+
276
+ # Initialize all buffers
277
+ peer_q, peer_k, peer_v, peer_m, peer_l, peer_o = maybe_get_set_global_memory_buffer(q, k, v, m, l, o)
278
+
279
+ fwd_launch_helper = lambda q, k, v, m, l, o, L, IS_CAUSAL, LAST_STEP: _fwd_kernel[grid](
280
+ q, k, v, sm_scale,
281
+ m,
282
+ l,
283
+ o,
284
+ L,
285
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
286
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
287
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
288
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
289
+ q.shape[0], q.shape[1], q.shape[2],
290
+ seqlen_q_rounded,
291
+ BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk,
292
+ IS_CAUSAL=IS_CAUSAL,
293
+ LAST_STEP=LAST_STEP,
294
+ num_warps=num_warps,
295
+ num_stages=4)
296
+
297
+ for time_step in range(seq_world_size // 2 + 1):
298
+ # This is important for cuda scheduler to execute nccl calls first.
299
+ torch.cuda.synchronize()
300
+ # Communication uses buffer_idx_1, and compute uses buffer_idx_2, which effectively are contents from the last time step.
301
+ buffer_idx_1 = time_step % 2
302
+ buffer_idx_2 = (time_step - 1) % 2
303
+
304
+ reqs = maybe_send_recv_fwd_qkvo(q, peer_q[buffer_idx_1], k, peer_k[buffer_idx_1], v, peer_v[buffer_idx_1],
305
+ [peer_o[buffer_idx_1], peer_m[buffer_idx_1], peer_l[buffer_idx_1]], time_step, comm_mode)
306
+ if comm_mode == "sync":
307
+ # if seq_rank == 0:
308
+ # print("Immediate wait for abalation")
309
+ wait_async_handles(reqs)
310
+ if is_compute_for_local_query(time_step):
311
+ # print(f"t={time_step}: (Comp) R={seq_rank} local compute")
312
+ if time_step == 0:
313
+ fwd_launch_helper(q, maybe_repeat_kv_fwd(q.shape[1], k), maybe_repeat_kv_fwd(q.shape[1], v), m, l, o, L, True, is_last_time(time_step))
314
+ else:
315
+ # if needs to sync from others, do not normalize here
316
+ fwd_launch_helper(q, maybe_repeat_kv_fwd(q.shape[1], peer_k[buffer_idx_2]), maybe_repeat_kv_fwd(q.shape[1], peer_v[buffer_idx_2]), m, l, o, L, False, not is_sync_from_remote(time_step) and is_last_time(time_step))
317
+ elif is_idle(time_step):
318
+ # print(f"t={time_step}: (Comp) R={seq_rank} idle")
319
+ pass
320
+ else:
321
+ # print(f"t={time_step}: (Comp) R={seq_rank} helps other")
322
+ peer_m[buffer_idx_2] = torch.full_like(m, fill_value=-float("inf"))
323
+ peer_l[buffer_idx_2] = torch.zeros_like(l)
324
+ peer_o[buffer_idx_2] = torch.zeros_like(o)
325
+
326
+ #print(f"rank 3 q is: {peer_q[buffer_idx_2]}")
327
+ fwd_launch_helper(peer_q[buffer_idx_2], maybe_repeat_kv_fwd(q.shape[1], k), maybe_repeat_kv_fwd(q.shape[1], v), peer_m[buffer_idx_2], peer_l[buffer_idx_2], peer_o[buffer_idx_2], None, False, False)
328
+
329
+ if comm_mode == "lightseq":
330
+ # Make sure tensors for next steps are ready
331
+ wait_async_handles(reqs)
332
+ # sync between statistics get from other ranks and the local ones
333
+ if is_sync_from_remote(time_step):
334
+ # print(f"t={time_step}: (Comp) R={seq_rank} sync with other - last time: {is_last_time(time_step)}")
335
+ seqlen_peer_q_rounded = peer_l[buffer_idx_1].shape[-1]
336
+ _rescale_kernel[grid](
337
+ peer_m[buffer_idx_1],
338
+ m,
339
+ peer_l[buffer_idx_1],
340
+ l,
341
+ peer_o[buffer_idx_1],
342
+ o,
343
+ L,
344
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
345
+ o.shape[0], o.shape[1], o.shape[2],
346
+ seqlen_q_rounded, seqlen_peer_q_rounded,
347
+ BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk,
348
+ LAST_STEP=is_last_time(time_step),
349
+ num_warps=num_warps,
350
+ num_stages=4)
351
+ return q, k, v, o, L, cu_seq_lens, max_seqlen
352
+
353
+ def _lightseq_backward_varlen(do, q, k, v, o, L, sm_scale, comm_mode, backward_engine, cu_seq_lens, max_seqlen):
354
+ BLOCK = 128
355
+ L = rearrange(L[:, :max_seqlen].contiguous(), '(b h) s -> b h s', b=q.shape[0])
356
+ q, k, v, o, do = [rearrange(_x, 'b h s d -> (b s) h d').contiguous() for _x in [q, k, v, o, do]]
357
+
358
+ dq = torch.empty_like(q)
359
+ dk = torch.empty_like(k)
360
+ dv = torch.empty_like(v)
361
+
362
+ # maybe gqa
363
+ nqh = q.shape[1]
364
+ nkvh = k.shape[1]
365
+ is_gqa = (nqh > nkvh)
366
+
367
+ seq_rank = get_sequence_parallel_rank()
368
+ seq_world_size = get_sequence_parallel_size()
369
+
370
+ # Initialize all backward buffers
371
+ dq_delta, dk_delta, dv_delta, dk_delta_from_peer, dv_delta_from_peer, \
372
+ peer_q, peer_L, peer_k, peer_v, peer_o, peer_do = maybe_get_set_global_memory_buffer_bwd(dq, dk, dv, q, L, k, v, o, do)
373
+
374
+ for time_step in range(0, get_sequence_parallel_size() // 2 + 1):
375
+ torch.cuda.synchronize()
376
+ buffer_idx_1 = time_step % 2
377
+ buffer_idx_2 = (time_step - 1) % 2
378
+
379
+ reqs, is_update_dq, is_update_dkv = maybe_send_recv_bwd_qkvo(dq_delta[buffer_idx_1], dk_delta[buffer_idx_1], dv_delta[buffer_idx_1], dk_delta_from_peer, dv_delta_from_peer, q, peer_q[buffer_idx_1], L, peer_L[buffer_idx_1], k, peer_k[buffer_idx_1], v, peer_v[buffer_idx_1], o, peer_o[buffer_idx_1], do, peer_do[buffer_idx_1], time_step, comm_mode)
380
+ if comm_mode == "sync":
381
+ wait_async_handles(reqs)
382
+
383
+ if is_compute_for_local_query(time_step):
384
+ if time_step == 0:
385
+ assert backward_engine == "flash", "We haven't supportted varlen feature in xformer"
386
+ if backward_engine == "flash":
387
+ _flash_attn_varlen_backward(do, q, k, v, o, L, dq, dk, dv, cu_seq_lens, cu_seq_lens, max_seqlen, max_seqlen, 0.0, sm_scale, True, None)
388
+ else:
389
+ inp = Inputs(query=q, key=maybe_repeat_kv_bwd(q.shape[2], k), value=maybe_repeat_kv_bwd(q.shape[2], v), attn_bias=xformers.ops.LowerTriangularMask(), p=0, scale=sm_scale)
390
+ op_ctx = Context(lse=L, out=o, rng_state=None)
391
+ # Let xformers dispatch the correct backend
392
+ grads = _memory_efficient_attention_backward(ctx=op_ctx, inp=inp, grad=do, op=None)
393
+ dq = grads.dq
394
+ dk, dv = maybe_reduce_dkv(nkvh, grads.dk), maybe_reduce_dkv(nkvh, grads.dv)
395
+ else:
396
+ assert backward_engine == "flash", "We haven't supportted varlen feature in xformer"
397
+ if backward_engine == "flash":
398
+ _flash_attn_varlen_backward(do, q, peer_k[buffer_idx_2], peer_v[buffer_idx_2], o, L, dq_delta[buffer_idx_2], dk_delta[buffer_idx_2], dv_delta[buffer_idx_2], cu_seq_lens, cu_seq_lens, max_seqlen, max_seqlen, 0.0, sm_scale, False, None)
399
+ else:
400
+ inp = Inputs(query=q, key=maybe_repeat_kv_bwd(q.shape[2], peer_k[buffer_idx_2]), value=maybe_repeat_kv_bwd(q.shape[2], peer_v[buffer_idx_2]), attn_bias=None, p=0, scale=sm_scale)
401
+ op_ctx = Context(lse=L, out=o, rng_state=None)
402
+ grads = _memory_efficient_attention_backward(ctx=op_ctx, inp=inp, grad=do, op=None)
403
+ dq_delta[buffer_idx_2] = grads.dq
404
+ dk_delta[buffer_idx_2], dv_delta[buffer_idx_2] = maybe_reduce_dkv(nkvh, grads.dk), maybe_reduce_dkv(nkvh, grads.dv)
405
+ dq += dq_delta[buffer_idx_2]
406
+ elif is_idle(time_step):
407
+ # print(f"BWD t={time_step}: (Comp) R={seq_rank} idle")
408
+ pass
409
+ else:
410
+ # print(f"BWD t={time_step}: (Comp) R={seq_rank} helps other")
411
+ assert backward_engine == "flash", "We haven't supportted varlen feature in xformer"
412
+ if backward_engine == "flash":
413
+ _flash_attn_varlen_backward(peer_do[buffer_idx_2], peer_q[buffer_idx_2], k, v, peer_o[buffer_idx_2], peer_L[buffer_idx_2], dq_delta[buffer_idx_2], dk_delta[buffer_idx_2], dv_delta[buffer_idx_2], cu_seq_lens, cu_seq_lens, max_seqlen, max_seqlen, 0.0, sm_scale, False, None)
414
+ else:
415
+ inp = Inputs(query=peer_q[buffer_idx_2], key=maybe_repeat_kv_bwd(q.shape[2], k), value=maybe_repeat_kv_bwd(q.shape[2], v), attn_bias=None, p=0, scale=sm_scale)
416
+ op_ctx = Context(lse=peer_L[buffer_idx_2], out=peer_o[buffer_idx_2], rng_state=None)
417
+ grads = _memory_efficient_attention_backward(ctx=op_ctx, inp=inp, grad=peer_do[buffer_idx_2], op=None)
418
+ dq_delta[buffer_idx_2] = grads.dq
419
+ dk_delta[buffer_idx_2], dv_delta[buffer_idx_2] = maybe_reduce_dkv(nkvh, grads.dk), maybe_reduce_dkv(nkvh, grads.dv)
420
+ dk += dk_delta[buffer_idx_2]
421
+ dv += dv_delta[buffer_idx_2]
422
+
423
+ if comm_mode == "lightseq":
424
+ # Make sure tensors for next steps are ready
425
+ wait_async_handles(reqs)
426
+
427
+ # The last time step needs to send dk and dv immediately, move it up here to maximize overlap with the following three addition.
428
+ reqs, is_update_last_dkv = maybe_send_recv_bwd_last_dkv(dk_delta[buffer_idx_2], dv_delta[buffer_idx_2], time_step, comm_mode)
429
+
430
+ if comm_mode == "sync":
431
+ # if seq_rank == 0:
432
+ # print("(bwd) dkv Immediate wait for abalation")
433
+ wait_async_handles(reqs)
434
+ # apply dq_delta, dk_delta and dv_delta from remote
435
+ if is_update_dq:
436
+ dq += dq_delta[buffer_idx_1]
437
+ if is_update_dkv:
438
+ dk += dk_delta_from_peer
439
+ dv += dv_delta_from_peer
440
+
441
+ if comm_mode == "lightseq":
442
+ wait_async_handles(reqs)
443
+ # apply dk_delta and dv_delta to sender
444
+ if is_update_last_dkv:
445
+ dk += dk_delta[buffer_idx_2]
446
+ dv += dv_delta[buffer_idx_2]
447
+
448
+ dq, dk, dv = [rearrange(_x, '(b s) h d -> b h s d', s=max_seqlen) for _x in [dq, dk, dv]]
449
+ return dq, dk, dv
450
+
451
+ class _attention_varlen(torch.autograd.Function):
452
+ @staticmethod
453
+ def forward(ctx, q, k, v, causal, sm_scale):
454
+ try:
455
+ global args
456
+ comm_mode = args.comm_mode
457
+ backward_engine = args.backward_engine
458
+ except:
459
+ comm_mode = 'lightseq'
460
+ backward_engine = 'flash'
461
+
462
+ q, k, v, o, L, cu_seq_lens, max_seqlen = _lightseq_forward_varlen(q, k, v, causal, sm_scale, comm_mode)
463
+
464
+ ctx.save_for_backward(q, k, v, o, L, cu_seq_lens)
465
+ ctx.max_seqlen = max_seqlen
466
+ ctx.sm_scale = sm_scale
467
+ ctx.comm_mode = comm_mode
468
+ ctx.backward_engine = backward_engine
469
+ return o
470
+
471
+ @staticmethod
472
+ def backward(ctx, do):
473
+ q, k, v, o, L, cu_seq_lens = ctx.saved_tensors
474
+ sm_scale = ctx.sm_scale
475
+ max_seqlen = ctx.max_seqlen
476
+
477
+ dq, dk, dv = _lightseq_backward_varlen(do, q, k, v, o, L, sm_scale, ctx.comm_mode, ctx.backward_engine, cu_seq_lens, max_seqlen)
478
+ return dq, dk, dv, None, None
479
+
480
+ dist_attn_varlen = _attention_varlen.apply
481
+
482
+
483
+ #@pytest.mark.parametrize('causal', [False, True])
484
+ #@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(6, 9, 1024, 64)])
485
+ def test_op(Z, H, N_CTX, D_HEAD, causal, dtype=torch.bfloat16):
486
+ torch.manual_seed(20)
487
+ rank = dist.get_rank()
488
+ world_size = dist.get_world_size()
489
+
490
+
491
+ PAD = world_size * 256
492
+ seq_per_rank = (N_CTX-PAD) // world_size
493
+ q = torch.empty((Z, H, N_CTX-PAD, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
494
+ k = torch.empty((Z, H, N_CTX-PAD, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
495
+ v = torch.empty((Z, H, N_CTX-PAD, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
496
+
497
+ # DEBUG: mask out
498
+ #mask = torch.zeros(Z, H, seq_per_rank * (world_size - 1), D_HEAD).cuda()
499
+ #mask_2 = torch.ones(Z, H, seq_per_rank, D_HEAD).cuda()
500
+ #mask = torch.cat((mask, mask_2), dim=-2).to(dtype)
501
+ #q = mask * q
502
+ #k = mask * k
503
+ #v = mask * v
504
+
505
+ sm_scale = 0.5
506
+ dout = torch.randn_like(q)
507
+ # reference implementation
508
+ M = torch.tril(torch.ones((N_CTX-PAD, N_CTX-PAD), device="cuda"))
509
+ p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
510
+ assert causal
511
+ if causal:
512
+ p[:, :, M == 0] = float("-inf")
513
+ p = torch.softmax(p.float(), dim=-1).half()
514
+ ref_out = torch.matmul(p, v)
515
+ ref_out.backward(dout)
516
+ ref_dv, v.grad = v.grad.clone(), None
517
+ ref_dk, k.grad = k.grad.clone(), None
518
+ ref_dq, q.grad = q.grad.clone(), None
519
+
520
+ # triton implementation
521
+
522
+ a, b, c, d = q.size()
523
+ real_q = q[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True)
524
+ real_k = k[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True)
525
+ real_v = v[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True)
526
+ real_do = dout[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True)
527
+
528
+ tri_out = dist_attn_varlen(real_q, real_k, real_v, causal, sm_scale).half()
529
+
530
+ # compare
531
+ assert torch.allclose(ref_out[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_out, atol=1e-2, rtol=0), f" rank {rank} fails forward"
532
+ print(f" *** rank {rank} passes forward")
533
+ tri_out.backward(real_do)
534
+ tri_dv, real_v.grad = real_v.grad.clone(), None
535
+ tri_dk, real_k.grad = real_k.grad.clone(), None
536
+ tri_dq, real_q.grad = real_q.grad.clone(), None
537
+ assert torch.allclose(ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dq, atol=1e-2, rtol=0), f"rank {rank} fails backward dq" #{ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dq} {torch.max(ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dq)} rank {rank} fails backward dk"
538
+ assert torch.allclose(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dk, atol=1e-2, rtol=0), f"rank {rank} fails backward dk" #{ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dk} {torch.max(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dk)} rank {rank} fails backward dk"
539
+ assert torch.allclose(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dv, atol=1e-2, rtol=0), f"rank {rank} fails backward dv" #{ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dv} {torch.max(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dv)} rank {rank} fails backward dv"
540
+ print(f"rank {rank} passes backward")
541
+
542
+ #TODO(High Priority): Investigate why rank 0 tends to have larger numerical difference.
543
+ def test_gqa(Z, H, KVH, N_CTX, D_HEAD, causal, dtype=torch.bfloat16):
544
+ torch.manual_seed(177)
545
+ q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
546
+ k = torch.empty((Z, KVH, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
547
+ v = torch.empty((Z, KVH, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
548
+
549
+ rank = dist.get_rank()
550
+ world_size = dist.get_world_size()
551
+ seq_per_rank = N_CTX // world_size
552
+
553
+ sm_scale = 0.5
554
+ dout = torch.randn_like(q)
555
+ # torch reference implementation
556
+ M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
557
+ ref_k = maybe_repeat_kv_fwd(q.shape[1], k).clone().detach().requires_grad_(True)
558
+ ref_v = maybe_repeat_kv_fwd(q.shape[1], v).clone().detach().requires_grad_(True)
559
+ #print(q.shape, ref_k.shape, k.shape)
560
+ p = torch.matmul(q, ref_k.transpose(2,3)) * sm_scale
561
+ assert causal
562
+ if causal:
563
+ p[:, :, M == 0] = float("-inf")
564
+ p = torch.softmax(p.float(), dim=-1).half()
565
+ ref_out = torch.matmul(p, ref_v)
566
+ ref_out.backward(dout)
567
+ ref_dv, v.grad = ref_v.grad.clone(), None
568
+ #print("Before reduce", ref_dv.shape)
569
+ ref_dv = (maybe_reduce_dkv(KVH, ref_dv.transpose(1,2))).transpose(1,2)
570
+ #print("After reduce", ref_dv.shape)
571
+ ref_dk, k.grad = ref_k.grad.clone(), None
572
+ ref_dk = (maybe_reduce_dkv(KVH, ref_dk.transpose(1,2))).transpose(1,2)
573
+ ref_dq, q.grad = q.grad.clone(), None
574
+
575
+ # flash reference
576
+ from flash_attn import flash_attn_qkvpacked_func, flash_attn_func
577
+ flash_q = q.transpose(1,2).clone().detach().requires_grad_(True)
578
+ flash_k = k.transpose(1,2).clone().detach().requires_grad_(True)
579
+ flash_v = v.transpose(1,2).clone().detach().requires_grad_(True)
580
+ flash_ref_out = flash_attn_func(flash_q, flash_k, flash_v, 0, sm_scale, True)
581
+ flash_ref_out.backward(dout.transpose(1,2))
582
+ flash_ref_out = flash_ref_out.transpose(1,2)
583
+ flash_ref_dv, v.grad = flash_v.grad.clone(), None
584
+ flash_ref_dv = flash_ref_dv.transpose(1,2)
585
+ flash_ref_dk, k.grad = flash_k.grad.clone(), None
586
+ flash_ref_dk = flash_ref_dk.transpose(1,2)
587
+ flash_ref_dq, q.grad = flash_q.grad.clone(), None
588
+ flash_ref_dq = flash_ref_dq.transpose(1,2)
589
+
590
+ # triton implementation
591
+
592
+ a, b, c, d = q.size()
593
+ real_q = q[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True)
594
+ real_k = k[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, KVH, -1, d).contiguous().clone().detach().requires_grad_(True)
595
+ real_v = v[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, KVH, -1, d).contiguous().clone().detach().requires_grad_(True)
596
+ real_do = dout[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True)
597
+
598
+ tri_out = dist_attn_varlen(real_q, real_k, real_v, causal, sm_scale).half()
599
+
600
+ # compare
601
+ assert torch.allclose(flash_ref_out[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_out, atol=1e-2, rtol=0), f" rank {rank} fails forward against flash"
602
+ print(f" *** rank {rank} passes forward")
603
+ tri_out.backward(real_do)
604
+ tri_dv, real_v.grad = real_v.grad.clone(), None
605
+ tri_dk, real_k.grad = real_k.grad.clone(), None
606
+ tri_dq, real_q.grad = real_q.grad.clone(), None
607
+ assert torch.allclose(flash_ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dq, atol=1e-2, rtol=0), f" rank {rank} fails backward dq against flash"
608
+ #print(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].shape, ref_dk.shape, tri_dk.shape)
609
+ assert torch.allclose(flash_ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dk, atol=1e-2, rtol=0), f"rank {rank} fails backward dk against flash {flash_ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dk} {torch.max(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dk)} rank {rank} fails backward dk"
610
+ assert torch.allclose(flash_ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dv, atol=1e-2, rtol=0), f"rank {rank} fails backward dv against flash {flash_ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dv} {torch.max(flash_ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dv)} rank {rank} fails backward dv"
611
+ print(f"rank {rank} passes backward against flash")
612
+
613
+ assert torch.allclose(ref_out[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_out, atol=1e-2, rtol=0), f" rank {rank} fails forward"
614
+ print(f" *** rank {rank} passes forward")
615
+ assert torch.allclose(ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dq, atol=1e-2, rtol=0), f" rank {rank} fails backward dq"
616
+ #print(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].shape, ref_dk.shape, tri_dk.shape)
617
+ assert torch.allclose(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dk, atol=1e-2, rtol=0), f"rank {rank} fails backward dk {ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dk} {torch.max(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dk)} rank {rank} fails backward dk"
618
+ assert torch.allclose(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dv, atol=1e-2, rtol=0), f"rank {rank} fails backward dv {ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dv} {torch.max(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dv)} rank {rank} fails backward dv"
619
+ print(f"rank {rank} passes backward")
620
+
621
+ #BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64
622
+ try:
623
+ from flash_attn.flash_attn_interface import \
624
+ flash_attn_qkvpacked_func as flash_attn_func
625
+ FLASH_VER = 2
626
+ except BaseException:
627
+ try:
628
+ from flash_attn.flash_attn_interface import flash_attn_func
629
+ FLASH_VER = 1
630
+ except BaseException:
631
+ FLASH_VER = None
632
+ HAS_FLASH = FLASH_VER is not None
633
+ HAS_FLASH = None
634
+ ONLY_FLASH = False
635
+
636
+ #BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64
637
+ BATCH, N_HEADS, N_CTX, D_HEAD = 1, 32, None, 128
638
+ # vary seq length for fixed head and batch=4
639
+ configs = [triton.testing.Benchmark(
640
+ x_names=['N_CTX'],
641
+ x_vals=[2**i for i in range(18, 19)],#[ 20, 21]],#[10, 11, 12, 13, 14, 15, 16, 17, 18]],
642
+ line_arg='provider',
643
+ line_vals=['triton'] if not ONLY_FLASH else [] + (['flash'] if HAS_FLASH else []),
644
+ line_names=['Triton'] if not ONLY_FLASH else [] + ([f'Flash-{FLASH_VER}'] if HAS_FLASH else []),
645
+ styles=[('red', '-'), ('blue', '-')],
646
+ ylabel='ms',
647
+ plot_name=f'fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-{mode}-{causal}',
648
+ args={'H': N_HEADS, 'BATCH': BATCH, 'D_HEAD': D_HEAD, 'dtype': torch.bfloat16, 'mode': mode, 'causal': causal}
649
+ ) for mode in ["all"] for causal in [True]]
650
+
651
+ # @triton.testing.perf_report(configs)
652
+ def bench_flash_attention(BATCH, H, KVH, N_CTX, D_HEAD, causal, mode, provider, args, dtype=torch.bfloat16, device="cuda"):
653
+ assert mode == "all" #mode in ['fwd', 'bwd']
654
+ n_warmup = 10
655
+ n_repeat = 10
656
+ cache = torch.empty(int(256e6), dtype=torch.int8, device='cuda')
657
+ seq_rank = get_sequence_parallel_rank()
658
+ seq_world_size = get_sequence_parallel_size()
659
+ if provider == "triton":
660
+ q = torch.randn((BATCH, H, N_CTX // seq_world_size, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
661
+ k = torch.randn((BATCH, KVH, N_CTX // seq_world_size, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
662
+ v = torch.randn((BATCH, KVH, N_CTX // seq_world_size, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
663
+ if seq_rank == 0:
664
+ print(f"Benchmarking per GPU qkv shape: {q.shape}")
665
+ sm_scale = 1.3
666
+ fwd_fn = lambda: dist_attn_varlen(q, k, v, causal, sm_scale)
667
+ if provider == "flash":
668
+ qkv = torch.randn((BATCH, N_CTX, 3, H, D_HEAD), dtype=dtype, device=device, requires_grad=True)
669
+ if FLASH_VER == 1:
670
+ lengths = torch.full((BATCH,), fill_value=N_CTX, device=device)
671
+ cu_seqlens = torch.zeros((BATCH + 1,), device=device, dtype=torch.int32)
672
+ cu_seqlens[1:] = lengths.cumsum(0)
673
+ qkv = qkv.reshape(BATCH * N_CTX, 3, H, D_HEAD)
674
+ fwd_fn = lambda: flash_attn_func(qkv, cu_seqlens, 0., N_CTX, causal=causal)
675
+ elif FLASH_VER == 2:
676
+ fwd_fn = lambda: flash_attn_func(qkv, causal=causal)
677
+ else:
678
+ raise ValueError(f'unknown {FLASH_VER = }')
679
+
680
+ flops_per_matmul = 2. * BATCH * H * N_CTX * N_CTX * D_HEAD / seq_world_size
681
+ attn_flops = 2 * flops_per_matmul
682
+
683
+ assert causal
684
+ if causal:
685
+ attn_flops *= 0.5
686
+ fwd_flops = attn_flops
687
+ bwd_flops = attn_flops * 2.5 # 2.0(bwd) + 0.5(recompute)
688
+
689
+ o = fwd_fn()
690
+ do = torch.randn_like(o)
691
+ bwd_fn = lambda: o.backward(do, retain_graph=True)
692
+
693
+ def run_benchmark(fn):
694
+ time_list = []
695
+ for _ in tqdm(range(n_warmup)):
696
+ cache.zero_()
697
+ fn()
698
+ torch.cuda.synchronize()
699
+ if args.debug:
700
+ print_and_reset_comm_stats()
701
+ for i in tqdm(range(n_repeat)):
702
+ cache.zero_()
703
+ torch.cuda.synchronize()
704
+ time_s = time.time()
705
+ fn()
706
+ torch.cuda.synchronize()
707
+ time_e = time.time()
708
+ time_list.append((time_e - time_s) * 1000.0)
709
+ if args.debug:
710
+ print_and_reset_comm_stats()
711
+ return np.asarray(time_list)
712
+
713
+ fwd_time_arr = run_benchmark(fwd_fn)
714
+ bwd_time_arr = run_benchmark(bwd_fn)
715
+
716
+ fwd_flops_ps = fwd_flops / np.mean(fwd_time_arr) * 1e-9
717
+ print(f"(FWD) R={seq_rank} avg: {np.mean(fwd_time_arr)}, std: {np.std(fwd_time_arr)} flops: {fwd_flops_ps} \n")
718
+
719
+ bwd_flops_ps = bwd_flops / np.mean(bwd_time_arr) * 1e-9
720
+ print(f"(BWD) R={seq_rank} avg: {np.mean(bwd_time_arr)}, std: {np.std(bwd_time_arr)} flops: {bwd_flops_ps} \n")
721
+
722
+ # total
723
+ total_time_arr = fwd_time_arr + bwd_time_arr
724
+ total_flops = fwd_flops + bwd_flops
725
+ total_flops_ps = total_flops / np.mean(total_time_arr) * 1e-9
726
+ print(f"(Total) R={seq_rank} avg: {np.mean(total_time_arr)}, std: {np.std(total_time_arr)} flops: {total_flops_ps} \n")
727
+
728
+ #return total_flops_ps
729
+
730
+
731
+ if __name__ == "__main__":
732
+ parser = argparse.ArgumentParser()
733
+ parser.add_argument("--comm-mode", type=str, default="lightseq")
734
+ parser.add_argument("--debug", action="store_true")
735
+ parser.add_argument("--run-mode", type=str, default="test")
736
+ parser.add_argument("--bs", type=int, default=1)
737
+ parser.add_argument("--n_heads", type=int, default=32)
738
+ parser.add_argument("--n_kvheads", type=int, default=32)
739
+ parser.add_argument("--d_head", type=int, default=128)
740
+ parser.add_argument("--start_ctx", type=int, default=12)
741
+ parser.add_argument("--end_ctx", type=int, default=18)
742
+ parser.add_argument("--forward_engine", type=str, default="triton")
743
+ parser.add_argument("--backward_engine", type=str, default="flash")
744
+
745
+ global args
746
+ args = parser.parse_args()
747
+ initialize_distributed()
748
+
749
+ assert args.forward_engine == "triton", "Only triton forward is implmented."
750
+ assert args.backward_engine in ["flash", "xformers"], "Only flash or xformers backward is implemented."
751
+
752
+ if args.backward_engine == "flash":
753
+ from flash_attn.flash_attn_interface import _flash_attn_forward, _flash_attn_backward
754
+ else:
755
+ try:
756
+ import xformers.ops
757
+ from xformers.ops.fmha.common import Inputs, Context
758
+ from xformers.ops.fmha import _memory_efficient_attention_backward
759
+ from xformers.ops.fmha import cutlass, flash
760
+ except ImportError:
761
+ print("xformers not found! Please install it before trying to use it.")
762
+
763
+ if args.run_mode == "benchmark":
764
+ for N_CTX in [2**i for i in range(args.start_ctx, args.end_ctx)]:
765
+ bench_flash_attention(args.bs, args.n_heads, args.n_kvheads, N_CTX, args.d_head, True, "all", "triton", args)#.run(save_path='.', print_data=True)
766
+ reset_global_memory_buffer()
767
+ else:
768
+ assert args.run_mode == "test"
769
+ for N_CTX in [4096]:
770
+ test_op(2, 16, N_CTX, 128, True)
771
+ #test_gqa(1, 16, 8, N_CTX, 128, True)
772
+ reset_global_memory_buffer()
vision_niah_d/easy_context/dist_flash_attn/monkey_patch.py ADDED
@@ -0,0 +1,609 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Materialization-aware gradient checkpointing monkey patch.
3
+ """
4
+ from typing import List, Optional, Tuple
5
+
6
+ import torch
7
+ from torch import nn
8
+ from torch.utils.checkpoint import _get_autocast_kwargs, check_backward_validity, get_device_states, set_device_states, detach_variable
9
+
10
+ import transformers
11
+ from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, BaseModelOutputWithPast
12
+
13
+ from einops import rearrange
14
+
15
+ from .lightseq_async_attn import _lightseq_forward, _lightseq_backward
16
+ from .async_communication import initialize_distributed, reset_global_memory_buffer
17
+
18
+
19
+ # define a global buffer to save flash attention outputs
20
+ # it's called global because it saves the outputs for all layers
21
+ global_flash_attn_out_buffer = None
22
+
23
+ # define a local buffer to save recomputed qkv
24
+ # it's called local because it's a temporary buffer which will be updated across layers
25
+ local_res_grad_buffer = None
26
+
27
+ # hooks for the gradients of residual
28
+ global_hooks = []
29
+
30
+ def init_flash_attn_buffers(num_layers):
31
+ # update the global buffer according to number of layers
32
+ global global_flash_attn_out_buffer
33
+ global_flash_attn_out_buffer = [None] * num_layers
34
+
35
+ def clean_hook():
36
+ # Remove all hooks in the global buffer
37
+ for hook in global_hooks:
38
+ hook.remove()
39
+ # Clear the global buffer
40
+ global_hooks.clear()
41
+
42
+ def clear_all_buffers_at_the_end_of_training():
43
+ # call it at the end of training
44
+ global lobal_flash_attn_out_buffer
45
+ global_flash_attn_out_buffer = None
46
+ global local_res_grad_buffer
47
+ local_res_grad_buffer = None
48
+ clean_hook()
49
+
50
+ def save_flash_attn_out_to_global_buffer(idx, out):
51
+ global global_flash_attn_out_buffer
52
+ global_flash_attn_out_buffer[idx] = out
53
+
54
+ def get_flash_attn_out_from_global_buffer(idx):
55
+ global global_flash_attn_out_buffer
56
+ return global_flash_attn_out_buffer[idx]
57
+
58
+ def free_flash_attn_out_buffer(idx):
59
+ global global_flash_attn_out_buffer
60
+ global_flash_attn_out_buffer[idx] = None
61
+
62
+ def write_gradient_to_flash_attn_out(idx, grad):
63
+ global global_flash_attn_out_buffer
64
+ global_flash_attn_out_buffer[idx].grad = grad
65
+
66
+ def save_res_grad_hook(grad):
67
+ global local_res_grad_buffer
68
+ local_res_grad_buffer = grad
69
+
70
+ def load_and_add_res_grad_hook(grad):
71
+ grad += get_res_grad_from_local_buffer()
72
+
73
+ def get_res_grad_from_local_buffer():
74
+ global local_res_grad_buffer
75
+ assert local_res_grad_buffer is not None
76
+ return local_res_grad_buffer
77
+
78
+ class CheckpointFunctionEndWithFlashAttention(torch.autograd.Function):
79
+ """ Avoid doing twice flash attention forward during checkpointed backward.
80
+ args:
81
+ hidden_states, # i.e., flash attention output which is saved in global buffer.
82
+ attention_mask,
83
+ position_ids,
84
+ residual, # the gradient of residual is saved in local buffer to pass across ckpt layers.
85
+ """
86
+
87
+ @staticmethod
88
+ def forward(ctx, run_function, layer_idx, preserve_rng_state, *args):
89
+ check_backward_validity(args)
90
+ ctx.run_function = run_function
91
+ ctx.layer_idx = layer_idx
92
+ ctx.preserve_rng_state = preserve_rng_state
93
+ # Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu.
94
+ ctx.gpu_autocast_kwargs, ctx.cpu_autocast_kwargs = _get_autocast_kwargs()
95
+ if preserve_rng_state:
96
+ ctx.fwd_cpu_state = torch.get_rng_state()
97
+ # Don't eagerly initialize the cuda context by accident.
98
+ # (If the user intends that the context is initialized later, within their
99
+ # run_function, we SHOULD actually stash the cuda state here. Unfortunately,
100
+ # we have no way to anticipate this will happen before we run the function.)
101
+ ctx.had_cuda_in_fwd = False
102
+ if torch.cuda._initialized:
103
+ ctx.had_cuda_in_fwd = True
104
+ ctx.fwd_gpu_devices, ctx.fwd_gpu_states = get_device_states(*args)
105
+
106
+ # Save non-tensor inputs in ctx, keep a placeholder None for tensors
107
+ # to be filled out during the backward.
108
+ ctx.inputs = []
109
+ ctx.tensor_indices = []
110
+ tensor_inputs = []
111
+ for i, arg in enumerate(args):
112
+ if i == 0 and ctx.layer_idx != 0:
113
+ # flash attention output is saved to the global buffer during forward
114
+ ctx.inputs.append(None)
115
+ else:
116
+ if torch.is_tensor(arg):
117
+ tensor_inputs.append(arg)
118
+ ctx.tensor_indices.append(i)
119
+ ctx.inputs.append(None)
120
+ else:
121
+ ctx.inputs.append(arg)
122
+
123
+ with torch.no_grad():
124
+ q, k, v, residual = run_function(*args)
125
+ softmax_scale = q.shape[-1] ** (-0.5)
126
+
127
+ # lightseq version
128
+ _, _, _, out, softmax_lse = _lightseq_forward(q, k, v, True, softmax_scale, comm_mode='lightseq')
129
+ rng_state = None
130
+
131
+ # save flash attention output to global buffer
132
+ save_flash_attn_out_to_global_buffer(ctx.layer_idx, out)
133
+ tensor_inputs += [softmax_lse]
134
+ ctx.softmax_scale = softmax_scale
135
+
136
+ ctx.save_for_backward(*tensor_inputs)
137
+
138
+ return out, residual
139
+
140
+ @staticmethod
141
+ def backward(ctx, *args):
142
+ if not torch.autograd._is_checkpoint_valid():
143
+ raise RuntimeError(
144
+ "Checkpointing is not compatible with .grad() or when an `inputs` parameter"
145
+ " is passed to .backward(). Please use .backward() and do not pass its `inputs`"
146
+ " argument.")
147
+ # Copy the list to avoid modifying original list.
148
+ inputs = list(ctx.inputs)
149
+ tensor_indices = ctx.tensor_indices
150
+ tensors = ctx.saved_tensors
151
+ tensors, softmax_lse = tensors[:-1], tensors[-1]
152
+
153
+ # Fill in inputs with appropriate saved tensors.
154
+ # Fill the flash attention output first
155
+ if ctx.layer_idx > 0:
156
+ # inputs[0] should be flash attention output
157
+ inputs[0] = get_flash_attn_out_from_global_buffer(ctx.layer_idx-1)
158
+ for i, idx in enumerate(tensor_indices):
159
+ inputs[idx] = tensors[i]
160
+
161
+ # Stash the surrounding rng state, and mimic the state that was
162
+ # present at this time during forward. Restore the surrounding state
163
+ # when we're done.
164
+ rng_devices = []
165
+ if ctx.preserve_rng_state and ctx.had_cuda_in_fwd:
166
+ rng_devices = ctx.fwd_gpu_devices
167
+ with torch.random.fork_rng(devices=rng_devices, enabled=ctx.preserve_rng_state):
168
+ if ctx.preserve_rng_state:
169
+ torch.set_rng_state(ctx.fwd_cpu_state)
170
+ if ctx.had_cuda_in_fwd:
171
+ set_device_states(ctx.fwd_gpu_devices, ctx.fwd_gpu_states)
172
+ detached_inputs = detach_variable(tuple(inputs))
173
+ with torch.enable_grad(), \
174
+ torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs), \
175
+ torch.cpu.amp.autocast(**ctx.cpu_autocast_kwargs):
176
+ # Stop recomputation before flash attention
177
+ # It is unecessary to run recomputation for flash attn
178
+ q, k, v, residual = ctx.run_function(*detached_inputs)
179
+
180
+ # run backward() with only tensor that requires grad
181
+ # run flash attention backward first:
182
+ # get 'dout' from auto_grad inputs
183
+ # get 'out' from global buffer
184
+ # get 'qkv' from the recomputed tensors
185
+ #dq = torch.empty(q.shape, dtype=q.dtype, device=q.device)
186
+ #dk = torch.empty(k.shape, dtype=q.dtype, device=q.device)
187
+ #dv = torch.empty(v.shape, dtype=q.dtype, device=q.device)
188
+ out = get_flash_attn_out_from_global_buffer(ctx.layer_idx)
189
+ # todo get dout
190
+ dout = args[0]
191
+
192
+ # lightseq version
193
+ dq, dk, dv = _lightseq_backward(dout, q, k, v, out, softmax_lse, ctx.softmax_scale, comm_mode='lightseq', backward_engine='flash')
194
+ #dqkv = torch.stack([dq, dk, dv])
195
+
196
+ # run backward for the part before flash attention
197
+ #qkv.backward(dqkv)
198
+ torch.autograd.backward([q, k, v], [dq, dk, dv])
199
+
200
+ grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else None
201
+ for inp in detached_inputs)
202
+
203
+ # write flash attention output gradients to buffer
204
+ if ctx.layer_idx > 0:
205
+ write_gradient_to_flash_attn_out(ctx.layer_idx-1, detached_inputs[0].grad)
206
+
207
+ return (None, None, None) + grads
208
+
209
+
210
+ def checkpoint_end_with_flash_attention(function, layer_idx, *args, use_reentrant: bool = True, **kwargs):
211
+ # Hack to mix *args with **kwargs in a python 2.7-compliant way
212
+ preserve = kwargs.pop('preserve_rng_state', True)
213
+ if kwargs and use_reentrant:
214
+ raise ValueError("Unexpected keyword arguments: " + ",".join(arg for arg in kwargs))
215
+
216
+ return CheckpointFunctionEndWithFlashAttention.apply(function, layer_idx, preserve, *args)
217
+
218
+
219
+ class CheckpointFunctionLastModule(torch.autograd.Function):
220
+ """
221
+ for the last ffn layer after flash attention, modifications include:
222
+ write the gradients wrt flash attention output and residual to the global buffer.
223
+ """
224
+
225
+ @staticmethod
226
+ def forward(ctx, run_function, preserve_rng_state, *args):
227
+ check_backward_validity(args)
228
+ ctx.run_function = run_function
229
+ ctx.preserve_rng_state = preserve_rng_state
230
+ # Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu.
231
+ ctx.gpu_autocast_kwargs, ctx.cpu_autocast_kwargs = _get_autocast_kwargs()
232
+ if preserve_rng_state:
233
+ ctx.fwd_cpu_state = torch.get_rng_state()
234
+ # Don't eagerly initialize the cuda context by accident.
235
+ # (If the user intends that the context is initialized later, within their
236
+ # run_function, we SHOULD actually stash the cuda state here. Unfortunately,
237
+ # we have no way to anticipate this will happen before we run the function.)
238
+ ctx.had_cuda_in_fwd = False
239
+ if torch.cuda._initialized:
240
+ ctx.had_cuda_in_fwd = True
241
+ ctx.fwd_gpu_devices, ctx.fwd_gpu_states = get_device_states(*args)
242
+
243
+ # Save non-tensor inputs in ctx, keep a placeholder None for tensors
244
+ # to be filled out during the backward.
245
+ ctx.inputs = []
246
+ ctx.tensor_indices = []
247
+ tensor_inputs = []
248
+
249
+ assert torch.is_tensor(args[0]), "assuming the first tensor is the flash attention output"
250
+ for i, arg in enumerate(args):
251
+ if torch.is_tensor(arg) and i == 0:
252
+ # flash attn output has been saved to global buffer
253
+ ctx.inputs.append(None)
254
+ elif torch.is_tensor(arg):
255
+ tensor_inputs.append(arg)
256
+ ctx.tensor_indices.append(i)
257
+ ctx.inputs.append(None)
258
+ else:
259
+ ctx.inputs.append(arg)
260
+
261
+ ctx.save_for_backward(*tensor_inputs)
262
+
263
+ with torch.no_grad():
264
+ outputs = run_function(*args)
265
+ return outputs
266
+
267
+ @staticmethod
268
+ def backward(ctx, *args):
269
+ if not torch.autograd._is_checkpoint_valid():
270
+ raise RuntimeError(
271
+ "Checkpointing is not compatible with .grad() or when an `inputs` parameter"
272
+ " is passed to .backward(). Please use .backward() and do not pass its `inputs`"
273
+ " argument.")
274
+ # Copy the list to avoid modifying original list.
275
+ inputs = list(ctx.inputs)
276
+ tensor_indices = ctx.tensor_indices
277
+ tensors = ctx.saved_tensors
278
+
279
+ # Fill in inputs with appropriate saved tensors.
280
+ # Fill the flash attention output first
281
+ # inputs[0] should be flash attention output
282
+ inputs[0] = get_flash_attn_out_from_global_buffer(-1)
283
+ for i, idx in enumerate(tensor_indices):
284
+ inputs[idx] = tensors[i]
285
+
286
+ # Stash the surrounding rng state, and mimic the state that was
287
+ # present at this time during forward. Restore the surrounding state
288
+ # when we're done.
289
+ rng_devices = []
290
+ if ctx.preserve_rng_state and ctx.had_cuda_in_fwd:
291
+ rng_devices = ctx.fwd_gpu_devices
292
+ with torch.random.fork_rng(devices=rng_devices, enabled=ctx.preserve_rng_state):
293
+ if ctx.preserve_rng_state:
294
+ torch.set_rng_state(ctx.fwd_cpu_state)
295
+ if ctx.had_cuda_in_fwd:
296
+ set_device_states(ctx.fwd_gpu_devices, ctx.fwd_gpu_states)
297
+ detached_inputs = detach_variable(tuple(inputs))
298
+ with torch.enable_grad(), \
299
+ torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs), \
300
+ torch.cpu.amp.autocast(**ctx.cpu_autocast_kwargs):
301
+ outputs = ctx.run_function(*detached_inputs)
302
+
303
+ if isinstance(outputs, torch.Tensor):
304
+ outputs = (outputs,)
305
+
306
+ # run backward() with only tensor that requires grad
307
+ outputs_with_grad = []
308
+ args_with_grad = []
309
+ for i in range(len(outputs)):
310
+ if torch.is_tensor(outputs[i]) and outputs[i].requires_grad:
311
+ outputs_with_grad.append(outputs[i])
312
+ args_with_grad.append(args[i])
313
+ if len(outputs_with_grad) == 0:
314
+ raise RuntimeError(
315
+ "none of output has requires_grad=True,"
316
+ " this checkpoint() is not necessary")
317
+ torch.autograd.backward(outputs_with_grad, args_with_grad)
318
+ grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else None
319
+ for inp in detached_inputs)
320
+
321
+ # write flash attention output gradients to buffer
322
+ write_gradient_to_flash_attn_out(-1, detached_inputs[0].grad)
323
+
324
+ return (None, None) + grads
325
+
326
+ def checkpoint_last_module(function, *args, use_reentrant: bool = True, **kwargs):
327
+ preserve = kwargs.pop('preserve_rng_state', True)
328
+ if kwargs and use_reentrant:
329
+ raise ValueError("Unexpected keyword arguments: " + ",".join(arg for arg in kwargs))
330
+
331
+ return CheckpointFunctionLastModule.apply(function, preserve, *args)
332
+
333
+
334
+ def llama_layer_forward(
335
+ self,
336
+ hidden_states: torch.Tensor,
337
+ attention_mask: Optional[torch.Tensor] = None,
338
+ position_ids: Optional[torch.LongTensor] = None,
339
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
340
+ output_attentions: Optional[bool] = False,
341
+ use_cache: Optional[bool] = False,
342
+ compute_attn_only: Optional[bool] = False,
343
+ compute_ffn_only: Optional[bool] = False,
344
+ residual: Optional[bool] = None,
345
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
346
+ """
347
+ Args:
348
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
349
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
350
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
351
+ output_attentions (`bool`, *optional*):
352
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
353
+ returned tensors for more detail.
354
+ use_cache (`bool`, *optional*):
355
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
356
+ (see `past_key_values`).
357
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
358
+ """
359
+ assert compute_ffn_only or compute_attn_only
360
+
361
+ if compute_attn_only:
362
+ residual = hidden_states
363
+
364
+ if residual.requires_grad:
365
+ # register a hook to add the gradient of residual
366
+ # from next checkpoint layer when doing recomputation
367
+ hook = residual.register_hook(load_and_add_res_grad_hook)
368
+ global_hooks.append(hook)
369
+
370
+ hidden_states = self.input_layernorm(hidden_states)
371
+
372
+ # Flash Attention
373
+ bsz, q_len, _ = hidden_states.size()
374
+ try:
375
+ query_states = self.self_attn.q_proj(hidden_states).view(bsz, q_len, self.self_attn.num_heads, self.self_attn.head_dim).transpose(1, 2)
376
+ key_states = self.self_attn.k_proj(hidden_states).view(bsz, q_len, self.self_attn.num_key_value_heads, self.self_attn.head_dim).transpose(1, 2)
377
+ value_states = self.self_attn.v_proj(hidden_states).view(bsz, q_len, self.self_attn.num_key_value_heads, self.self_attn.head_dim).transpose(1, 2)
378
+ except:
379
+ # old transformers versions don't support num_key_value_heads
380
+ query_states = self.self_attn.q_proj(hidden_states).view(bsz, q_len, self.self_attn.num_heads, self.self_attn.head_dim).transpose(1, 2)
381
+ key_states = self.self_attn.k_proj(hidden_states).view(bsz, q_len, self.self_attn.num_heads, self.self_attn.head_dim).transpose(1, 2)
382
+ value_states = self.self_attn.v_proj(hidden_states).view(bsz, q_len, self.self_attn.num_heads, self.self_attn.head_dim).transpose(1, 2)
383
+
384
+ kv_seq_len = key_states.shape[-2]
385
+ assert past_key_value is None, "past_key_value is not supported"
386
+
387
+ cos, sin = self.self_attn.rotary_emb(value_states, position_ids)
388
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
389
+ # [bsz, nh, t, hd]
390
+ assert not output_attentions, "output_attentions is not supported"
391
+ assert not use_cache, "use_cache is not supported"
392
+ return query_states.contiguous(), key_states.contiguous(), value_states.contiguous(), residual
393
+
394
+ elif compute_ffn_only:
395
+ hidden_states = self.self_attn.o_proj(rearrange(hidden_states, 'b h s d -> b s (h d)'))
396
+ # Need to add residual here to make sure checkpoint is right after attention
397
+ if residual.requires_grad:
398
+ # save the gradient of residual to the local buffer
399
+ # collect the hooks which should be removed after backward to avoid memory leak
400
+ hook = residual.register_hook(save_res_grad_hook)
401
+ global_hooks.append(hook)
402
+
403
+ hidden_states = residual + hidden_states
404
+
405
+ # Fully Connected
406
+
407
+ residual = hidden_states
408
+ hidden_states = self.post_attention_layernorm(hidden_states)
409
+ hidden_states = self.mlp(hidden_states)
410
+ hidden_states = residual + hidden_states
411
+
412
+ outputs = (hidden_states,)
413
+
414
+ else:
415
+ raise AttributeError
416
+
417
+ return outputs
418
+
419
+
420
+ def forward(
421
+ self,
422
+ input_ids: torch.LongTensor = None,
423
+ attention_mask: Optional[torch.Tensor] = None,
424
+ position_ids: Optional[torch.LongTensor] = None,
425
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
426
+ inputs_embeds: Optional[torch.FloatTensor] = None,
427
+ use_cache: Optional[bool] = None,
428
+ output_attentions: Optional[bool] = None,
429
+ output_hidden_states: Optional[bool] = None,
430
+ cache_position: Optional[torch.LongTensor] = None,
431
+ return_dict: Optional[bool] = None,
432
+ ):
433
+ assert cache_position is None, "cache_position is not supported"
434
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
435
+ output_hidden_states = (
436
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
437
+ )
438
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
439
+
440
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
441
+
442
+ # retrieve input_ids and inputs_embeds
443
+ if input_ids is not None and inputs_embeds is not None:
444
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
445
+ elif input_ids is not None:
446
+ batch_size, seq_length = input_ids.shape
447
+ elif inputs_embeds is not None:
448
+ batch_size, seq_length, _ = inputs_embeds.shape
449
+ else:
450
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
451
+
452
+ seq_length_with_past = seq_length
453
+ past_key_values_length = 0
454
+
455
+ if past_key_values is not None:
456
+ past_key_values_length = past_key_values[0][0].shape[2]
457
+ seq_length_with_past = seq_length_with_past + past_key_values_length
458
+
459
+ if position_ids is None:
460
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
461
+ position_ids = torch.arange(
462
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
463
+ )
464
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
465
+ else:
466
+ position_ids = position_ids.view(-1, seq_length).long()
467
+
468
+ if inputs_embeds is None:
469
+ inputs_embeds = self.embed_tokens(input_ids)
470
+ # embed positions
471
+ attention_mask = None
472
+
473
+ hidden_states = inputs_embeds
474
+
475
+ if self.gradient_checkpointing and self.training:
476
+ try:
477
+ logger.warning_once(
478
+ "***** Using fast gradient checkpointing... *****"
479
+ )
480
+ except:
481
+ pass
482
+ # initialize the global buffer
483
+ init_flash_attn_buffers(len(self.layers))
484
+
485
+ if use_cache:
486
+ try:
487
+ logger.warning_once(
488
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
489
+ )
490
+ except:
491
+ pass
492
+ use_cache = False
493
+
494
+ # decoder layers
495
+ all_hidden_states = () if output_hidden_states else None
496
+ all_self_attns = () if output_attentions else None
497
+ next_decoder_cache = () if use_cache else None
498
+
499
+ # apply flash-attention friendly gradient checkpointing
500
+ if self.gradient_checkpointing and self.training:
501
+ for idx in range(len(self.layers) + 1):
502
+ if output_hidden_states:
503
+ all_hidden_states += (hidden_states,)
504
+
505
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
506
+
507
+ def forward_first_attn_module(module):
508
+ def custom_forward(*inputs):
509
+ hidden_states, attention_mask, position_ids, _ = inputs
510
+ # None for past_key_value
511
+ return module(hidden_states, attention_mask, position_ids, past_key_value, output_attentions, compute_attn_only=True)
512
+ return custom_forward
513
+
514
+ def forward_ffn_attn_layer(module1, module2):
515
+ def custom_forward(*inputs):
516
+ hidden_states, attention_mask, position_ids, residual = inputs
517
+ # None for past_key_value
518
+ layer_outputs = module1(hidden_states, attention_mask, position_ids, past_key_value, output_attentions, compute_ffn_only=True, residual=residual)
519
+ hidden_states = layer_outputs[0]
520
+ return module2(hidden_states, attention_mask, position_ids, past_key_value, output_attentions, compute_attn_only=True)
521
+ return custom_forward
522
+
523
+ def forward_last_ffn_module(module):
524
+ def custom_forward(*inputs):
525
+ hidden_states, attention_mask, position_ids, residual = inputs
526
+ # None for past_key_value
527
+ return module(hidden_states, attention_mask, position_ids, past_key_value, output_attentions, compute_ffn_only=True, residual=residual)
528
+ return custom_forward
529
+
530
+ if idx == 0:
531
+ layer_outputs = checkpoint_end_with_flash_attention(
532
+ forward_first_attn_module(self.layers[0]),
533
+ idx,
534
+ hidden_states,
535
+ attention_mask,
536
+ position_ids,
537
+ None,
538
+ )
539
+ hidden_states, residual = layer_outputs[0], layer_outputs[-1]
540
+ elif idx == len(self.layers):
541
+ layer_outputs = checkpoint_last_module(
542
+ forward_last_ffn_module(self.layers[-1]),
543
+ hidden_states,
544
+ attention_mask,
545
+ position_ids,
546
+ residual,
547
+ )
548
+ hidden_states = layer_outputs[0]
549
+ else:
550
+ layer_outputs = checkpoint_end_with_flash_attention(
551
+ forward_ffn_attn_layer(self.layers[idx-1], self.layers[idx]),
552
+ idx,
553
+ hidden_states,
554
+ attention_mask,
555
+ position_ids,
556
+ residual,
557
+ )
558
+ hidden_states, residual = layer_outputs[0], layer_outputs[-1]
559
+
560
+ if use_cache:
561
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
562
+
563
+ if output_attentions:
564
+ all_self_attns += (layer_outputs[1],)
565
+ else:
566
+ for idx, decoder_layer in enumerate(self.layers):
567
+ if output_hidden_states:
568
+ all_hidden_states += (hidden_states,)
569
+
570
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
571
+
572
+ layer_outputs = decoder_layer(
573
+ hidden_states,
574
+ attention_mask=attention_mask,
575
+ position_ids=position_ids,
576
+ past_key_value=past_key_value,
577
+ output_attentions=output_attentions,
578
+ use_cache=use_cache,
579
+ )
580
+
581
+ hidden_states = layer_outputs[0]
582
+
583
+ if use_cache:
584
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
585
+
586
+ if output_attentions:
587
+ all_self_attns += (layer_outputs[1],)
588
+
589
+ hidden_states = self.norm(hidden_states)
590
+
591
+ # add hidden states from the last decoder layer
592
+ if output_hidden_states:
593
+ all_hidden_states += (hidden_states,)
594
+
595
+ next_cache = next_decoder_cache if use_cache else None
596
+ if not return_dict:
597
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
598
+ return BaseModelOutputWithPast(
599
+ last_hidden_state=hidden_states,
600
+ past_key_values=next_cache,
601
+ hidden_states=all_hidden_states,
602
+ attentions=all_self_attns,
603
+ )
604
+
605
+
606
+ def apply_dist_flash_attn_monkey_patch_llama():
607
+ initialize_distributed()
608
+ transformers.models.llama.modeling_llama.LlamaModel.forward = forward
609
+ transformers.models.llama.modeling_llama.LlamaDecoderLayer.forward = llama_layer_forward
vision_niah_d/easy_context/dist_flash_attn/prepare_input.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ def extract_local(value, rank, world_size, device, dim=1):
4
+ value_local = value.chunk(world_size, dim=dim)[rank]
5
+ return value_local.to(device)
6
+
7
+
8
+ def prepare_dist_flash_attn_inputs(
9
+ input_ids, position_ids, target_ids, rank, world_size, device
10
+ ):
11
+ local_input_ids = extract_local(
12
+ input_ids,
13
+ rank,
14
+ world_size,
15
+ device,
16
+ )
17
+ local_position_ids = extract_local(
18
+ position_ids,
19
+ rank,
20
+ world_size,
21
+ device,
22
+ )
23
+ if target_ids is not None:
24
+ local_target_ids = extract_local(
25
+ target_ids,
26
+ rank,
27
+ world_size,
28
+ device,
29
+ )
30
+ else:
31
+ local_target_ids = None
32
+ return {
33
+ "local_input_ids": local_input_ids,
34
+ "local_position_ids": local_position_ids,
35
+ "local_target_ids": local_target_ids,
36
+ }
vision_niah_d/easy_context/low_mem_cross_ent.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Low memory cross entropy without materilizing the logits
2
+
3
+ This module enables long-context training of large vocab models, e.g., Gemma has 250K vocab and Llama 3 has 150K
4
+
5
+ Yao Fu, University of Edinburgh
6
7
+ """
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+
12
+
13
+ def cross_ent_normal(x, weight, labels):
14
+ logits = torch.einsum("bsh, vh -> bsv", x, weight)
15
+ vocab = weight.size(0)
16
+ loss = F.cross_entropy(logits.view(-1, vocab), labels.view(-1))
17
+ return loss
18
+
19
+ class LowMemLogitProjCrossEnt(torch.autograd.Function):
20
+ """Low memory implementation of logits projection plus cross entropy loss.
21
+ Useful for reducing the peak memory when dealing with vocabulary larger than 100000
22
+
23
+ TODO: integrate this function into easy context
24
+
25
+ Two tricks used here
26
+ 1. Shard the data to reduce peak memory
27
+ 2. Do not save the logits
28
+ """
29
+
30
+ @staticmethod
31
+ # @torch.compile() # Currently we do not use torch.compile because it uses additional memory
32
+ def forward(ctx, x: torch.Tensor, weight: torch.Tensor, labels: torch.Tensor, sp: int=4):
33
+ """
34
+ Args:
35
+ x: size = [batch, seqlen, hidden]
36
+ weight: size = [vocab, hidden]
37
+ labels: size = [batch, seqlen]
38
+ """
39
+ bsz, seqlen, hidden = x.size()
40
+ vocab = weight.size(0)
41
+ micro_seqlen = seqlen // sp
42
+
43
+ loss = 0
44
+ for i in range(sp): # shard data along the sequence dimension
45
+ logits_i_slice = torch.einsum("bsh, vh -> bsv", x[:, micro_seqlen * i: micro_seqlen * (i + 1)], weight)
46
+ loss_i = F.cross_entropy(logits_i_slice.view(-1, vocab), labels[:, micro_seqlen * i: micro_seqlen * (i + 1)].view(-1))
47
+ loss = loss + loss_i
48
+
49
+ loss = loss / sp
50
+ ctx.save_for_backward(x, weight, labels) # because we do no save logits, we save memory
51
+ ctx.sp = sp
52
+ return loss
53
+
54
+ # @torch.compile()
55
+ @staticmethod
56
+ def backward(ctx, grad_output):
57
+ """Manually calculate the gradient in a memory-efficient way
58
+ Ref: https://indii.org/blog/gradients-of-softmax-and-logsumexp/
59
+ """
60
+ x, weight, labels = ctx.saved_tensors
61
+ sp = ctx.sp
62
+ device = x.device
63
+ dtype = x.dtype
64
+ bsz, seqlen, hidden = x.size()
65
+ vocab, hidden = weight.size()
66
+ micro_seqlen = seqlen // sp
67
+
68
+ d_weight = torch.zeros_like(weight, device=weight.device)
69
+ d_x = []
70
+ for i in range(sp): # shard data along sequence dimension, reduce peak memory
71
+ x_ = x[:, micro_seqlen * i: micro_seqlen * (i + 1)]
72
+ p = F.softmax(
73
+ torch.einsum("blh, vh -> blv", x_, weight),
74
+ dim=-1
75
+ )
76
+
77
+ # memory efficient in-place backprop
78
+ # loss -> d_logits
79
+ d_logits = -p.view(-1) # [b * l * v]
80
+ labels_ = labels[:, micro_seqlen * i: micro_seqlen * (i + 1)].view(-1) # [b * l]
81
+ index = torch.arange(bsz * micro_seqlen, device=device) * vocab + labels_
82
+ source = torch.tensor([1] * bsz * micro_seqlen, dtype=dtype, device=device)
83
+ d_logits.index_add_(0, index, source)
84
+ d_logits = -d_logits.view(bsz, micro_seqlen, vocab) / (bsz * seqlen)
85
+
86
+ # d_logits -> d_x and d_weight
87
+ d_x.append(torch.einsum("blv, vh -> blh", d_logits, weight))
88
+ d_weight += torch.einsum("blv, blh -> vh", d_logits, x_)
89
+
90
+ d_weight = grad_output * d_weight
91
+ d_x = grad_output * torch.concat(d_x, 1)
92
+ return d_x, d_weight, None, None
93
+
94
+ low_mem_cross_ent = LowMemLogitProjCrossEnt.apply
vision_niah_d/easy_context/low_mem_cross_ent_tests/test_correctness.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test the correctness (up to certain tolerance of numerical error) of low-memory cross-ent
2
+
3
+ Yao Fu, University of Edinburgh
4
5
+ """
6
+
7
+ import sys
8
+ sys.path.append("..")
9
+
10
+ import torch
11
+ import torch.nn.functional as F
12
+ from low_mem_cross_ent import low_mem_cross_ent, cross_ent_normal
13
+
14
+ bsz = 1
15
+ seqlen = 50000
16
+ hidden = 4096
17
+ vocab = 15000
18
+ dtype = torch.bfloat16
19
+ rtol=1e-05 # relative tolerance when comparing the gradients from two implementations
20
+ atol=1e-07 # absolute tolerance when comparing the gradients from two implementations
21
+ # in Pytorch its default is 1e-8 but our implementation cannot pass this threshold
22
+ # 1e-7 seems to be the smallest rolerance we can pass
23
+
24
+ x = torch.normal(mean=0, std=0.01, size=(bsz, seqlen, hidden),
25
+ device="cuda", dtype=dtype, requires_grad=True)
26
+ weight = torch.normal(mean=0, std=0.01, size=(vocab, hidden),
27
+ device="cuda", dtype=dtype, requires_grad=True)
28
+ labels = torch.randint(low=0, high=vocab - 1, size=(bsz, seqlen), device="cuda")
29
+
30
+ loss_normal = cross_ent_normal(x, weight, labels)
31
+ print("loss normal: %.4f" % loss_normal.cpu().item())
32
+ loss_normal.backward()
33
+ x_grad = x.grad.clone()
34
+ weight_grad = weight.grad.clone()
35
+ # print(x.grad)
36
+ # print(weight.grad)
37
+
38
+
39
+ # TODO: this one almost reduce memory to half. Maybe further increase sp
40
+ x.grad = None
41
+ weight.grad = None
42
+ loss_low_mem = low_mem_cross_ent(x, weight, labels)
43
+ print("loss low mem: %.4f" % loss_low_mem.cpu().item())
44
+ loss_low_mem.backward()
45
+ # print(x.grad)
46
+ # print(weight.grad)
47
+
48
+ ## Test implementation by asserting close
49
+ assert(torch.allclose(x_grad, x.grad, rtol=rtol, atol=atol))
50
+ assert(torch.allclose(weight_grad, weight.grad, rtol=rtol, atol=atol))
51
+ print("PASS: gradients from normal computation and low memory computation are close.")
52
+
53
+
54
+ # #### Test gradient of logits
55
+ # x.grad = None
56
+ # weight.grad = None
57
+ # logits = torch.einsum("bsh, vh -> bsv", x, weight)
58
+ # loss = F.cross_entropy(logits.view(-1, vocab), labels.view(-1))
59
+ # d_logits = torch.autograd.grad(loss, logits)
60
+ # p = F.softmax(torch.einsum("blh, vh -> blv", x, weight), dim=-1)
61
+ # p_ = p / (bsz * seqlen)
62
+
63
+ # #### test index add
64
+ # x = torch.tensor([1, 2, 3, 4, 5, 6, 7])
65
+ # index = torch.tensor([1, 3, 4])
66
+ # source = torch.tensor([1, 1, 1])
67
+ # x.index_add_(dim=0, index=index, source=source)
68
+
69
+ # #### test index add 2
70
+ # sp = 4
71
+ # micro_seqlen = seqlen // sp
72
+ # p = torch.normal(mean=0, std=0.01, size=(bsz, micro_seqlen, vocab),
73
+ # device="cuda", dtype=torch.bfloat16)
74
+ # labels_ = labels[:, :micro_seqlen].view(-1)
75
+ # index = torch.arange(bsz * micro_seqlen, device="cuda") * vocab
76
+ # index += labels_
77
+ # d_logits = -p.view(-1)
78
+ # source = torch.tensor([1] * bsz * micro_seqlen, dtype=torch.bfloat16, device="cuda")
79
+ # d_logits.index_add_(0, index, source)
80
+ # d_logits = d_logits.view(bsz, micro_seqlen, vocab)
81
+
vision_niah_d/easy_context/low_mem_cross_ent_tests/test_mem_and_speed.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test the memory and speed and MFU of low memory cross entropy
2
+
3
+ Yao Fu, University of Edinburgh
4
5
+
6
+ bf16, seqlen=50000, vocab=150000, without torch.compile
7
+ | | normal | low_mem | - |
8
+ | sp | - | 4 | 16 |
9
+ | peak mem | 43.4G | 18.5G | 8.1G |
10
+ | forward | 0.307 | 0.310 | 0.315 |
11
+ | backward | 0.631 | 0.896 | 0.914 |
12
+ | MFU | 0.57 | 0.45 | 0.44 |
13
+
14
+ NOTE: tried torch.compile and it takes significantly larger memory, so do not use
15
+ TODO: profile and check why backward is slower
16
+ """
17
+ import sys
18
+ sys.path.append("..")
19
+
20
+ import torch
21
+ import numpy as np
22
+ import torch.nn.functional as F
23
+ from low_mem_cross_ent import low_mem_cross_ent, cross_ent_normal
24
+
25
+ implementation = "low_mem" # "normal", "low_mem"
26
+ device_type = "A100"
27
+ bsz = 1
28
+ seqlen = 50000
29
+ hidden = 4096
30
+ vocab = 150000
31
+ sp=16
32
+ dtype = torch.bfloat16
33
+ # dtype = torch.float
34
+ G = 1024 ** 3
35
+ T = 1024 ** 4
36
+
37
+ x = torch.normal(mean=0, std=0.01, size=(bsz, seqlen, hidden),
38
+ device="cuda", dtype=dtype, requires_grad=True)
39
+ weight = torch.normal(mean=0, std=0.01, size=(vocab, hidden),
40
+ device="cuda", dtype=dtype, requires_grad=True)
41
+ labels = torch.randint(low=0, high=vocab - 1, size=(bsz, seqlen), device="cuda")
42
+
43
+ def timed(fn):
44
+ start = torch.cuda.Event(enable_timing=True)
45
+ end = torch.cuda.Event(enable_timing=True)
46
+ start.record()
47
+ result = fn()
48
+ end.record()
49
+ torch.cuda.synchronize()
50
+ return result, start.elapsed_time(end) / 1000
51
+
52
+ n_runs = 50
53
+ flop = 6 * bsz * seqlen * hidden * vocab
54
+ if(implementation == "normal"):
55
+ forward_times, backward_times = [], []
56
+ for _ in range(n_runs):
57
+ loss_normal, time_elapse = timed(lambda: cross_ent_normal(x, weight, labels))
58
+ forward_times.append(time_elapse)
59
+ _, time_elapse = timed(lambda: loss_normal.backward())
60
+ backward_times.append(time_elapse)
61
+ mem = torch.cuda.max_memory_allocated()
62
+ elif(implementation == "low_mem"):
63
+ forward_times, backward_times = [], []
64
+ for _ in range(n_runs):
65
+ loss_low_mem, time_elapse = timed(lambda: low_mem_cross_ent(x, weight, labels, sp))
66
+ forward_times.append(time_elapse)
67
+ _, time_elapse = timed(lambda: loss_low_mem.backward())
68
+ backward_times.append(time_elapse)
69
+ mem = torch.cuda.max_memory_allocated()
70
+ else: raise NameError("Implementation %s not recognized" % implementation)
71
+
72
+ forward_time = np.median(forward_times)
73
+ backward_time = np.median(backward_times)
74
+ flops = (flop / T) / (forward_time + backward_time)
75
+ if(device_type == "A100"):
76
+ device_flop = 312
77
+ else: raise NameError("device %s not recognized" % device_type)
78
+
79
+ print("%s, peak memory %.1fG, forward time %.4f, backward time %.4f, flops %.2fT, util %.2f" %
80
+ (implementation, mem / G, forward_time, backward_time, flops, flops / device_flop))
vision_niah_d/easy_context/modeling_qwen2.py ADDED
@@ -0,0 +1,1397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """PyTorch Qwen2 model."""
21
+
22
+ import inspect
23
+ import math
24
+ from typing import List, Optional, Tuple, Union
25
+
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.utils.checkpoint
29
+ from torch import nn
30
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
31
+
32
+ from transformers.activations import ACT2FN
33
+ from transformers.cache_utils import Cache, DynamicCache
34
+ from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
35
+ from transformers.modeling_outputs import (
36
+ BaseModelOutputWithPast,
37
+ CausalLMOutputWithPast,
38
+ SequenceClassifierOutputWithPast,
39
+ TokenClassifierOutput,
40
+ )
41
+ from transformers.modeling_utils import PreTrainedModel
42
+ from transformers.utils import (
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ is_flash_attn_2_available,
46
+ is_flash_attn_greater_or_equal_2_10,
47
+ logging,
48
+ replace_return_docstrings,
49
+ )
50
+ from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
51
+ from .low_mem_cross_ent import low_mem_cross_ent
52
+
53
+
54
+ if is_flash_attn_2_available():
55
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
56
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
57
+
58
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
59
+ from ring_flash_attn.zigzag_ring_flash_attn import zigzag_ring_flash_attn_func
60
+
61
+
62
+ logger = logging.get_logger(__name__)
63
+
64
+
65
+ _CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta"
66
+ _CONFIG_FOR_DOC = "Qwen2Config"
67
+
68
+
69
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
70
+ def _get_unpad_data(attention_mask):
71
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
72
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
73
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
74
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
75
+ return (
76
+ indices,
77
+ cu_seqlens,
78
+ max_seqlen_in_batch,
79
+ )
80
+
81
+
82
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2
83
+ class Qwen2RMSNorm(nn.Module):
84
+ def __init__(self, hidden_size, eps=1e-6):
85
+ """
86
+ Qwen2RMSNorm is equivalent to T5LayerNorm
87
+ """
88
+ super().__init__()
89
+ self.weight = nn.Parameter(torch.ones(hidden_size))
90
+ self.variance_epsilon = eps
91
+
92
+ def forward(self, hidden_states):
93
+ input_dtype = hidden_states.dtype
94
+ hidden_states = hidden_states.to(torch.float32)
95
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
96
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
97
+ return self.weight * hidden_states.to(input_dtype)
98
+
99
+
100
+ # Copied from transformers.models.mixtral.modeling_mixtral.MixtralRotaryEmbedding with Mixtral->Qwen2
101
+ class Qwen2RotaryEmbedding(nn.Module):
102
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
103
+ super().__init__()
104
+ self.dim = dim
105
+ self.max_position_embeddings = max_position_embeddings
106
+ self.base = base
107
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
108
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
109
+ # For BC we register cos and sin cached
110
+ self.max_seq_len_cached = max_position_embeddings
111
+
112
+ @torch.no_grad()
113
+ def forward(self, x, position_ids):
114
+ # x: [bs, num_attention_heads, seq_len, head_size]
115
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
116
+ position_ids_expanded = position_ids[:, None, :].float()
117
+ # Force float32 since bfloat16 loses precision on long contexts
118
+ # See https://github.com/huggingface/transformers/pull/29285
119
+ device_type = x.device.type
120
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
121
+ with torch.autocast(device_type=device_type, enabled=False):
122
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
123
+ emb = torch.cat((freqs, freqs), dim=-1)
124
+ cos = emb.cos()
125
+ sin = emb.sin()
126
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
127
+
128
+
129
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
130
+ def rotate_half(x):
131
+ """Rotates half the hidden dims of the input."""
132
+ x1 = x[..., : x.shape[-1] // 2]
133
+ x2 = x[..., x.shape[-1] // 2 :]
134
+ return torch.cat((-x2, x1), dim=-1)
135
+
136
+
137
+ # Copied from transformers.models.mixtral.modeling_mixtral.apply_rotary_pos_emb
138
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
139
+ """Applies Rotary Position Embedding to the query and key tensors.
140
+
141
+ Args:
142
+ q (`torch.Tensor`): The query tensor.
143
+ k (`torch.Tensor`): The key tensor.
144
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
145
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
146
+ position_ids (`torch.Tensor`, *optional*):
147
+ Deprecated and unused.
148
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
149
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
150
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
151
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
152
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
153
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
154
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
155
+ Returns:
156
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
157
+ """
158
+ cos = cos.unsqueeze(unsqueeze_dim)
159
+ sin = sin.unsqueeze(unsqueeze_dim)
160
+ q_embed = (q * cos) + (rotate_half(q) * sin)
161
+ k_embed = (k * cos) + (rotate_half(k) * sin)
162
+ return q_embed, k_embed
163
+
164
+
165
+ # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2
166
+ class Qwen2MLP(nn.Module):
167
+ def __init__(self, config):
168
+ super().__init__()
169
+ self.hidden_size = config.hidden_size
170
+ self.intermediate_size = config.intermediate_size
171
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
172
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
173
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
174
+ self.act_fn = ACT2FN[config.hidden_act]
175
+
176
+ def forward(self, hidden_state):
177
+ return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
178
+
179
+
180
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
181
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
182
+ """
183
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
184
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
185
+ """
186
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
187
+ if n_rep == 1:
188
+ return hidden_states
189
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
190
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
191
+
192
+
193
+ class Qwen2Attention(nn.Module):
194
+ """
195
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
196
+ and "Generating Long Sequences with Sparse Transformers".
197
+ """
198
+
199
+ def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None):
200
+ super().__init__()
201
+ self.config = config
202
+ self.layer_idx = layer_idx
203
+ if layer_idx is None:
204
+ logger.warning_once(
205
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
206
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
207
+ "when creating this class."
208
+ )
209
+
210
+ self.hidden_size = config.hidden_size
211
+ self.num_heads = config.num_attention_heads
212
+ self.head_dim = self.hidden_size // self.num_heads
213
+ self.num_key_value_heads = config.num_key_value_heads
214
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
215
+ self.max_position_embeddings = config.max_position_embeddings
216
+ self.rope_theta = config.rope_theta
217
+ self.is_causal = True
218
+ self.attention_dropout = config.attention_dropout
219
+
220
+ if (self.head_dim * self.num_heads) != self.hidden_size:
221
+ raise ValueError(
222
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
223
+ f" and `num_heads`: {self.num_heads})."
224
+ )
225
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
226
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
227
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
228
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
229
+
230
+ self.rotary_emb = Qwen2RotaryEmbedding(
231
+ self.head_dim,
232
+ max_position_embeddings=self.max_position_embeddings,
233
+ base=self.rope_theta,
234
+ )
235
+
236
+ def forward(
237
+ self,
238
+ hidden_states: torch.Tensor,
239
+ attention_mask: Optional[torch.Tensor] = None,
240
+ position_ids: Optional[torch.LongTensor] = None,
241
+ past_key_value: Optional[Cache] = None,
242
+ output_attentions: bool = False,
243
+ use_cache: bool = False,
244
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
245
+ bsz, q_len, _ = hidden_states.size()
246
+
247
+ query_states = self.q_proj(hidden_states)
248
+ key_states = self.k_proj(hidden_states)
249
+ value_states = self.v_proj(hidden_states)
250
+
251
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
252
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
253
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
254
+
255
+ kv_seq_len = key_states.shape[-2]
256
+ if past_key_value is not None:
257
+ if self.layer_idx is None:
258
+ raise ValueError(
259
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
260
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
261
+ "with a layer index."
262
+ )
263
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
264
+ cos, sin = self.rotary_emb(value_states, position_ids)
265
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
266
+
267
+ if past_key_value is not None:
268
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
269
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
270
+
271
+ # repeat k/v heads if n_kv_heads < n_heads
272
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
273
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
274
+
275
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
276
+
277
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
278
+ raise ValueError(
279
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
280
+ f" {attn_weights.size()}"
281
+ )
282
+
283
+ if attention_mask is not None:
284
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
285
+ raise ValueError(
286
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
287
+ )
288
+
289
+ attn_weights = attn_weights + attention_mask
290
+
291
+ # upcast attention to fp32
292
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
293
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
294
+ attn_output = torch.matmul(attn_weights, value_states)
295
+
296
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
297
+ raise ValueError(
298
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
299
+ f" {attn_output.size()}"
300
+ )
301
+
302
+ attn_output = attn_output.transpose(1, 2).contiguous()
303
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
304
+
305
+ attn_output = self.o_proj(attn_output)
306
+
307
+ if not output_attentions:
308
+ attn_weights = None
309
+
310
+ return attn_output, attn_weights, past_key_value
311
+
312
+
313
+ class Qwen2FlashAttention2(Qwen2Attention):
314
+ """
315
+ Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
316
+ as the weights of the module stays untouched. The only required change would be on the forward pass
317
+ where it needs to correctly call the public API of flash attention and deal with padding tokens
318
+ in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
319
+ config.max_window_layers layers.
320
+ """
321
+
322
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
323
+ def __init__(self, *args, **kwargs):
324
+ super().__init__(*args, **kwargs)
325
+
326
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
327
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
328
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
329
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
330
+
331
+ def forward(
332
+ self,
333
+ hidden_states: torch.Tensor,
334
+ attention_mask: Optional[torch.Tensor] = None,
335
+ position_ids: Optional[torch.LongTensor] = None,
336
+ past_key_value: Optional[Cache] = None,
337
+ output_attentions: bool = False,
338
+ use_cache: bool = False,
339
+ ):
340
+ bsz, q_len, _ = hidden_states.size()
341
+
342
+ query_states = self.q_proj(hidden_states)
343
+ key_states = self.k_proj(hidden_states)
344
+ value_states = self.v_proj(hidden_states)
345
+
346
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
347
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
348
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
349
+
350
+ kv_seq_len = key_states.shape[-2]
351
+ if past_key_value is not None:
352
+ if self.layer_idx is None:
353
+ raise ValueError(
354
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
355
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
356
+ "with a layer index."
357
+ )
358
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
359
+
360
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
361
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
362
+ cos, sin = self.rotary_emb(value_states, position_ids)
363
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
364
+
365
+ use_sliding_windows = (
366
+ _flash_supports_window_size
367
+ and getattr(self.config, "sliding_window", None) is not None
368
+ and kv_seq_len > self.config.sliding_window
369
+ and self.config.use_sliding_window
370
+ )
371
+
372
+ if not _flash_supports_window_size:
373
+ logger.warning_once(
374
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
375
+ " make sure to upgrade flash-attn library."
376
+ )
377
+
378
+ if past_key_value is not None:
379
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
380
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
381
+ if (
382
+ getattr(self.config, "sliding_window", None) is not None
383
+ and kv_seq_len > self.config.sliding_window
384
+ and cache_has_contents
385
+ ):
386
+ slicing_tokens = 1 - self.config.sliding_window
387
+
388
+ past_key = past_key_value[self.layer_idx][0]
389
+ past_value = past_key_value[self.layer_idx][1]
390
+
391
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
392
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
393
+
394
+ if past_key.shape[-2] != self.config.sliding_window - 1:
395
+ raise ValueError(
396
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
397
+ f" {past_key.shape}"
398
+ )
399
+
400
+ if attention_mask is not None:
401
+ attention_mask = attention_mask[:, slicing_tokens:]
402
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
403
+
404
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
405
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
406
+
407
+ # repeat k/v heads if n_kv_heads < n_heads
408
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
409
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
410
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
411
+
412
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
413
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
414
+ # cast them back in float16 just to be sure everything works as expected.
415
+ input_dtype = query_states.dtype
416
+ if input_dtype == torch.float32:
417
+ if torch.is_autocast_enabled():
418
+ target_dtype = torch.get_autocast_gpu_dtype()
419
+ # Handle the case where the model is quantized
420
+ elif hasattr(self.config, "_pre_quantization_dtype"):
421
+ target_dtype = self.config._pre_quantization_dtype
422
+ else:
423
+ target_dtype = self.q_proj.weight.dtype
424
+
425
+ logger.warning_once(
426
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
427
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
428
+ f" {target_dtype}."
429
+ )
430
+
431
+ query_states = query_states.to(target_dtype)
432
+ key_states = key_states.to(target_dtype)
433
+ value_states = value_states.to(target_dtype)
434
+
435
+ # Reashape to the expected shape for Flash Attention
436
+ query_states = query_states.transpose(1, 2)
437
+ key_states = key_states.transpose(1, 2)
438
+ value_states = value_states.transpose(1, 2)
439
+
440
+ attn_output = self._flash_attention_forward(
441
+ query_states,
442
+ key_states,
443
+ value_states,
444
+ attention_mask,
445
+ q_len,
446
+ dropout=dropout_rate,
447
+ use_sliding_windows=use_sliding_windows,
448
+ )
449
+
450
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
451
+ attn_output = self.o_proj(attn_output)
452
+
453
+ if not output_attentions:
454
+ attn_weights = None
455
+
456
+ return attn_output, attn_weights, past_key_value
457
+
458
+ def _flash_attention_forward(
459
+ self,
460
+ query_states,
461
+ key_states,
462
+ value_states,
463
+ attention_mask,
464
+ query_length,
465
+ dropout=0.0,
466
+ softmax_scale=None,
467
+ use_sliding_windows=False,
468
+ ):
469
+ if not self._flash_attn_uses_top_left_mask:
470
+ causal = self.is_causal
471
+ else:
472
+ causal = self.is_causal and query_length != 1
473
+
474
+ # Contains at least one padding token in the sequence
475
+ assert attention_mask is None
476
+ assert causal is True
477
+ assert use_sliding_windows is False
478
+ attn_output = zigzag_ring_flash_attn_func(
479
+ query_states,
480
+ key_states,
481
+ value_states,
482
+ dropout,
483
+ softmax_scale,
484
+ causal=causal,
485
+ )
486
+
487
+ return attn_output
488
+
489
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
490
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
491
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
492
+
493
+ # On the first iteration we need to properly re-create the padding mask
494
+ # by slicing it on the proper place
495
+ if kv_seq_len != attention_mask.shape[-1]:
496
+ attention_mask_num_tokens = attention_mask.shape[-1]
497
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
498
+
499
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
500
+
501
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
502
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
503
+
504
+ if query_length == kv_seq_len:
505
+ query_layer = index_first_axis(
506
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
507
+ )
508
+ cu_seqlens_q = cu_seqlens_k
509
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
510
+ indices_q = indices_k
511
+ elif query_length == 1:
512
+ max_seqlen_in_batch_q = 1
513
+ cu_seqlens_q = torch.arange(
514
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
515
+ ) # There is a memcpy here, that is very bad.
516
+ indices_q = cu_seqlens_q[:-1]
517
+ query_layer = query_layer.squeeze(1)
518
+ else:
519
+ # The -q_len: slice assumes left padding.
520
+ attention_mask = attention_mask[:, -query_length:]
521
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
522
+
523
+ return (
524
+ query_layer,
525
+ key_layer,
526
+ value_layer,
527
+ indices_q,
528
+ (cu_seqlens_q, cu_seqlens_k),
529
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
530
+ )
531
+
532
+
533
+ # Copied from transformers.models.mixtral.modeling_mixtral.MixtralSdpaAttention with Mixtral->Qwen2
534
+ class Qwen2SdpaAttention(Qwen2Attention):
535
+ """
536
+ Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
537
+ `Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
538
+ SDPA API.
539
+ """
540
+
541
+ # Adapted from Qwen2Attention.forward
542
+ def forward(
543
+ self,
544
+ hidden_states: torch.Tensor,
545
+ attention_mask: Optional[torch.Tensor] = None,
546
+ position_ids: Optional[torch.LongTensor] = None,
547
+ past_key_value: Optional[Cache] = None,
548
+ output_attentions: bool = False,
549
+ use_cache: bool = False,
550
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
551
+ if output_attentions:
552
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
553
+ logger.warning_once(
554
+ "Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
555
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
556
+ )
557
+ return super().forward(
558
+ hidden_states=hidden_states,
559
+ attention_mask=attention_mask,
560
+ position_ids=position_ids,
561
+ past_key_value=past_key_value,
562
+ output_attentions=output_attentions,
563
+ use_cache=use_cache,
564
+ )
565
+
566
+ bsz, q_len, _ = hidden_states.size()
567
+
568
+ query_states = self.q_proj(hidden_states)
569
+ key_states = self.k_proj(hidden_states)
570
+ value_states = self.v_proj(hidden_states)
571
+
572
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
573
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
574
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
575
+
576
+ kv_seq_len = key_states.shape[-2]
577
+ if past_key_value is not None:
578
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
579
+ cos, sin = self.rotary_emb(value_states, position_ids)
580
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
581
+
582
+ if past_key_value is not None:
583
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
584
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
585
+
586
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
587
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
588
+
589
+ if attention_mask is not None:
590
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
591
+ raise ValueError(
592
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
593
+ )
594
+
595
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
596
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
597
+ if query_states.device.type == "cuda" and attention_mask is not None:
598
+ query_states = query_states.contiguous()
599
+ key_states = key_states.contiguous()
600
+ value_states = value_states.contiguous()
601
+
602
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
603
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
604
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
605
+ is_causal = True if self.is_causal and attention_mask is None and q_len > 1 else False
606
+
607
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
608
+ query_states,
609
+ key_states,
610
+ value_states,
611
+ attn_mask=attention_mask,
612
+ dropout_p=self.attention_dropout if self.training else 0.0,
613
+ is_causal=is_causal,
614
+ )
615
+
616
+ attn_output = attn_output.transpose(1, 2).contiguous()
617
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
618
+
619
+ attn_output = self.o_proj(attn_output)
620
+
621
+ return attn_output, None, past_key_value
622
+
623
+
624
+ QWEN2_ATTENTION_CLASSES = {
625
+ "eager": Qwen2Attention,
626
+ "flash_attention_2": Qwen2FlashAttention2,
627
+ "sdpa": Qwen2SdpaAttention,
628
+ }
629
+
630
+
631
+ class Qwen2DecoderLayer(nn.Module):
632
+ def __init__(self, config: Qwen2Config, layer_idx: int):
633
+ super().__init__()
634
+ self.hidden_size = config.hidden_size
635
+
636
+ if config.use_sliding_window and config._attn_implementation != "flash_attention_2":
637
+ logger.warning_once(
638
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
639
+ "unexpected results may be encountered."
640
+ )
641
+ self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
642
+
643
+ self.mlp = Qwen2MLP(config)
644
+ self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
645
+ self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
646
+
647
+ def forward(
648
+ self,
649
+ hidden_states: torch.Tensor,
650
+ attention_mask: Optional[torch.Tensor] = None,
651
+ position_ids: Optional[torch.LongTensor] = None,
652
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
653
+ output_attentions: Optional[bool] = False,
654
+ use_cache: Optional[bool] = False,
655
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
656
+ """
657
+ Args:
658
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
659
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
660
+ `(batch, sequence_length)` where padding elements are indicated by 0.
661
+ output_attentions (`bool`, *optional*):
662
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
663
+ returned tensors for more detail.
664
+ use_cache (`bool`, *optional*):
665
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
666
+ (see `past_key_values`).
667
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
668
+ """
669
+ assert isinstance(
670
+ self.self_attn, Qwen2FlashAttention2
671
+ )
672
+ residual = hidden_states
673
+
674
+ hidden_states = self.input_layernorm(hidden_states)
675
+
676
+ # Self Attention
677
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
678
+ hidden_states=hidden_states,
679
+ attention_mask=attention_mask,
680
+ position_ids=position_ids,
681
+ past_key_value=past_key_value,
682
+ output_attentions=output_attentions,
683
+ use_cache=use_cache,
684
+ )
685
+ hidden_states = residual + hidden_states
686
+
687
+ # Fully Connected
688
+ residual = hidden_states
689
+ hidden_states = self.post_attention_layernorm(hidden_states)
690
+ hidden_states = self.mlp(hidden_states)
691
+ hidden_states = residual + hidden_states
692
+
693
+ outputs = (hidden_states,)
694
+
695
+ if output_attentions:
696
+ outputs += (self_attn_weights,)
697
+
698
+ if use_cache:
699
+ outputs += (present_key_value,)
700
+
701
+ return outputs
702
+
703
+
704
+ QWEN2_START_DOCSTRING = r"""
705
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
706
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
707
+ etc.)
708
+
709
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
710
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
711
+ and behavior.
712
+
713
+ Parameters:
714
+ config ([`Qwen2Config`]):
715
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
716
+ load the weights associated with the model, only the configuration. Check out the
717
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
718
+ """
719
+
720
+
721
+ @add_start_docstrings(
722
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
723
+ QWEN2_START_DOCSTRING,
724
+ )
725
+ class Qwen2PreTrainedModel(PreTrainedModel):
726
+ config_class = Qwen2Config
727
+ base_model_prefix = "model"
728
+ supports_gradient_checkpointing = True
729
+ _no_split_modules = ["Qwen2DecoderLayer"]
730
+ _skip_keys_device_placement = "past_key_values"
731
+ _supports_flash_attn_2 = True
732
+ _supports_sdpa = True
733
+ _supports_cache_class = True
734
+
735
+ def _init_weights(self, module):
736
+ std = self.config.initializer_range
737
+ if isinstance(module, nn.Linear):
738
+ module.weight.data.normal_(mean=0.0, std=std)
739
+ if module.bias is not None:
740
+ module.bias.data.zero_()
741
+ elif isinstance(module, nn.Embedding):
742
+ module.weight.data.normal_(mean=0.0, std=std)
743
+ if module.padding_idx is not None:
744
+ module.weight.data[module.padding_idx].zero_()
745
+
746
+
747
+ QWEN2_INPUTS_DOCSTRING = r"""
748
+ Args:
749
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
750
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
751
+ it.
752
+
753
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
754
+ [`PreTrainedTokenizer.__call__`] for details.
755
+
756
+ [What are input IDs?](../glossary#input-ids)
757
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
758
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
759
+
760
+ - 1 for tokens that are **not masked**,
761
+ - 0 for tokens that are **masked**.
762
+
763
+ [What are attention masks?](../glossary#attention-mask)
764
+
765
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
766
+ [`PreTrainedTokenizer.__call__`] for details.
767
+
768
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
769
+ `past_key_values`).
770
+
771
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
772
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
773
+ information on the default strategy.
774
+
775
+ - 1 indicates the head is **not masked**,
776
+ - 0 indicates the head is **masked**.
777
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
778
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
779
+ config.n_positions - 1]`.
780
+
781
+ [What are position IDs?](../glossary#position-ids)
782
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
783
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
784
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
785
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
786
+
787
+ Two formats are allowed:
788
+ - a [`~cache_utils.Cache`] instance;
789
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
790
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
791
+ cache format.
792
+
793
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
794
+ legacy cache format will be returned.
795
+
796
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
797
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
798
+ of shape `(batch_size, sequence_length)`.
799
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
800
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
801
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
802
+ model's internal embedding lookup matrix.
803
+ use_cache (`bool`, *optional*):
804
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
805
+ `past_key_values`).
806
+ output_attentions (`bool`, *optional*):
807
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
808
+ tensors for more detail.
809
+ output_hidden_states (`bool`, *optional*):
810
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
811
+ more detail.
812
+ return_dict (`bool`, *optional*):
813
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
814
+ """
815
+
816
+
817
+ @add_start_docstrings(
818
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
819
+ QWEN2_START_DOCSTRING,
820
+ )
821
+ class Qwen2Model(Qwen2PreTrainedModel):
822
+ """
823
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
824
+
825
+ Args:
826
+ config: Qwen2Config
827
+ """
828
+
829
+ def __init__(self, config: Qwen2Config):
830
+ super().__init__(config)
831
+ self.padding_idx = config.pad_token_id
832
+ self.vocab_size = config.vocab_size
833
+
834
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
835
+ self.layers = nn.ModuleList(
836
+ [Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
837
+ )
838
+ self._attn_implementation = config._attn_implementation
839
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
840
+
841
+ self.gradient_checkpointing = False
842
+ # Initialize weights and apply final processing
843
+ self.post_init()
844
+
845
+ def get_input_embeddings(self):
846
+ return self.embed_tokens
847
+
848
+ def set_input_embeddings(self, value):
849
+ self.embed_tokens = value
850
+
851
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
852
+ def forward(
853
+ self,
854
+ input_ids: torch.LongTensor = None,
855
+ attention_mask: Optional[torch.Tensor] = None,
856
+ position_ids: Optional[torch.LongTensor] = None,
857
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
858
+ inputs_embeds: Optional[torch.FloatTensor] = None,
859
+ use_cache: Optional[bool] = None,
860
+ output_attentions: Optional[bool] = None,
861
+ output_hidden_states: Optional[bool] = None,
862
+ return_dict: Optional[bool] = None,
863
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
864
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
865
+ output_hidden_states = (
866
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
867
+ )
868
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
869
+
870
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
871
+
872
+ # retrieve input_ids and inputs_embeds
873
+ if input_ids is not None and inputs_embeds is not None:
874
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
875
+ elif input_ids is not None:
876
+ batch_size, seq_length = input_ids.shape
877
+ elif inputs_embeds is not None:
878
+ batch_size, seq_length, _ = inputs_embeds.shape
879
+ else:
880
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
881
+
882
+ if self.gradient_checkpointing and self.training:
883
+ if use_cache:
884
+ logger.warning_once(
885
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
886
+ )
887
+ use_cache = False
888
+
889
+ past_key_values_length = 0
890
+
891
+ if use_cache:
892
+ use_legacy_cache = not isinstance(past_key_values, Cache)
893
+ if use_legacy_cache:
894
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
895
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
896
+
897
+ if position_ids is None:
898
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
899
+ position_ids = torch.arange(
900
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
901
+ )
902
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
903
+ else:
904
+ position_ids = position_ids.view(-1, seq_length).long()
905
+
906
+ if inputs_embeds is None:
907
+ inputs_embeds = self.embed_tokens(input_ids)
908
+
909
+ if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
910
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
911
+ if is_padding_right:
912
+ raise ValueError(
913
+ "You are attempting to perform batched generation with padding_side='right'"
914
+ " this may lead to unexpected behaviour for Flash Attention version of Qwen2. Make sure to "
915
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
916
+ )
917
+
918
+ if self._attn_implementation == "flash_attention_2":
919
+ # 2d mask is passed through the layers
920
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
921
+ elif self._attn_implementation == "sdpa" and not output_attentions:
922
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
923
+ # the manual implementation that requires a 4D causal mask in all cases.
924
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
925
+ attention_mask,
926
+ (batch_size, seq_length),
927
+ inputs_embeds,
928
+ past_key_values_length,
929
+ sliding_window=self.config.sliding_window,
930
+ )
931
+ else:
932
+ # 4d mask is passed through the layers
933
+ attention_mask = _prepare_4d_causal_attention_mask(
934
+ attention_mask,
935
+ (batch_size, seq_length),
936
+ inputs_embeds,
937
+ past_key_values_length,
938
+ sliding_window=self.config.sliding_window,
939
+ )
940
+
941
+ hidden_states = inputs_embeds
942
+
943
+ # decoder layers
944
+ all_hidden_states = () if output_hidden_states else None
945
+ all_self_attns = () if output_attentions else None
946
+ next_decoder_cache = None
947
+
948
+ for decoder_layer in self.layers:
949
+ if output_hidden_states:
950
+ all_hidden_states += (hidden_states,)
951
+
952
+ if self.gradient_checkpointing and self.training:
953
+ layer_outputs = self._gradient_checkpointing_func(
954
+ decoder_layer.__call__,
955
+ hidden_states,
956
+ attention_mask,
957
+ position_ids,
958
+ past_key_values,
959
+ output_attentions,
960
+ use_cache,
961
+ )
962
+ else:
963
+ layer_outputs = decoder_layer(
964
+ hidden_states,
965
+ attention_mask=attention_mask,
966
+ position_ids=position_ids,
967
+ past_key_value=past_key_values,
968
+ output_attentions=output_attentions,
969
+ use_cache=use_cache,
970
+ )
971
+
972
+ hidden_states = layer_outputs[0]
973
+
974
+ if use_cache:
975
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
976
+
977
+ if output_attentions:
978
+ all_self_attns += (layer_outputs[1],)
979
+
980
+ hidden_states = self.norm(hidden_states)
981
+
982
+ # add hidden states from the last decoder layer
983
+ if output_hidden_states:
984
+ all_hidden_states += (hidden_states,)
985
+
986
+ next_cache = None
987
+ if use_cache:
988
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
989
+
990
+ if not return_dict:
991
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
992
+ return BaseModelOutputWithPast(
993
+ last_hidden_state=hidden_states,
994
+ past_key_values=next_cache,
995
+ hidden_states=all_hidden_states,
996
+ attentions=all_self_attns,
997
+ )
998
+
999
+
1000
+ class Qwen2ForCausalLM_RingAttn(Qwen2PreTrainedModel):
1001
+ _tied_weights_keys = ["lm_head.weight"]
1002
+
1003
+ def __init__(self, config):
1004
+ super().__init__(config)
1005
+ self.model = Qwen2Model(config)
1006
+ self.vocab_size = config.vocab_size
1007
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1008
+
1009
+ # Initialize weights and apply final processing
1010
+ self.post_init()
1011
+
1012
+ def get_input_embeddings(self):
1013
+ return self.model.embed_tokens
1014
+
1015
+ def set_input_embeddings(self, value):
1016
+ self.model.embed_tokens = value
1017
+
1018
+ def get_output_embeddings(self):
1019
+ return self.lm_head
1020
+
1021
+ def set_output_embeddings(self, new_embeddings):
1022
+ self.lm_head = new_embeddings
1023
+
1024
+ def set_decoder(self, decoder):
1025
+ self.model = decoder
1026
+
1027
+ def get_decoder(self):
1028
+ return self.model
1029
+
1030
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1031
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1032
+ def forward(
1033
+ self,
1034
+ input_ids: torch.LongTensor = None,
1035
+ attention_mask: Optional[torch.Tensor] = None,
1036
+ position_ids: Optional[torch.LongTensor] = None,
1037
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1038
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1039
+ labels: Optional[torch.LongTensor] = None,
1040
+ use_cache: Optional[bool] = None,
1041
+ output_attentions: Optional[bool] = None,
1042
+ output_hidden_states: Optional[bool] = None,
1043
+ return_dict: Optional[bool] = None,
1044
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1045
+ r"""
1046
+ Args:
1047
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1048
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1049
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1050
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1051
+
1052
+ Returns:
1053
+
1054
+ Example:
1055
+
1056
+ ```python
1057
+ >>> from transformers import AutoTokenizer, Qwen2ForCausalLM
1058
+
1059
+ >>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1060
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1061
+
1062
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1063
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1064
+
1065
+ >>> # Generate
1066
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1067
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1068
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1069
+ ```"""
1070
+
1071
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1072
+ output_hidden_states = (
1073
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1074
+ )
1075
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1076
+
1077
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1078
+ outputs = self.model(
1079
+ input_ids=input_ids,
1080
+ attention_mask=attention_mask,
1081
+ position_ids=position_ids,
1082
+ past_key_values=past_key_values,
1083
+ inputs_embeds=inputs_embeds,
1084
+ use_cache=use_cache,
1085
+ output_attentions=output_attentions,
1086
+ output_hidden_states=output_hidden_states,
1087
+ return_dict=return_dict,
1088
+ )
1089
+
1090
+ hidden_states = outputs[0]
1091
+
1092
+ loss, logits = None, None
1093
+ if labels is not None:
1094
+ # Shift so that tokens < n predict n
1095
+ # shift_labels = labels[:, 1:].to(hidden_states.device)
1096
+ loss = low_mem_cross_ent(hidden_states,
1097
+ self.lm_head.weight,
1098
+ labels.to(hidden_states.device),
1099
+ 16
1100
+ )
1101
+ logits = self.lm_head(hidden_states).float()
1102
+ # loss = None
1103
+ # if labels is not None:
1104
+ # # Shift so that tokens < n predict n
1105
+ # shift_logits = logits[..., :-1, :].contiguous()
1106
+ # shift_labels = labels[..., 1:].contiguous()
1107
+ # # Flatten the tokens
1108
+ # loss_fct = CrossEntropyLoss()
1109
+ # shift_logits = shift_logits.view(-1, self.config.vocab_size)
1110
+ # shift_labels = shift_labels.view(-1)
1111
+ # # Enable model parallelism
1112
+ # shift_labels = shift_labels.to(shift_logits.device)
1113
+ # loss = loss_fct(shift_logits, shift_labels)
1114
+
1115
+ if not return_dict:
1116
+ output = (logits,) + outputs[1:]
1117
+ return (loss,) + output if loss is not None else output
1118
+
1119
+ return CausalLMOutputWithPast(
1120
+ loss=loss,
1121
+ logits=logits,
1122
+ past_key_values=outputs.past_key_values,
1123
+ hidden_states=outputs.hidden_states,
1124
+ attentions=outputs.attentions,
1125
+ )
1126
+
1127
+ def prepare_inputs_for_generation(
1128
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1129
+ ):
1130
+ past_length = 0
1131
+ # Omit tokens covered by past_key_values
1132
+ if past_key_values is not None:
1133
+ # Past key values are always initialized with a `Cache` object -> no need for if-else anymore
1134
+ cache_length = past_key_values.get_seq_length()
1135
+ past_length = past_key_values.seen_tokens
1136
+ max_cache_length = past_key_values.get_max_length()
1137
+
1138
+ # Keep only the unprocessed tokens:
1139
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1140
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1141
+ # input)
1142
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1143
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1144
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1145
+ # input_ids based on the past_length.
1146
+ elif past_length < input_ids.shape[1]:
1147
+ input_ids = input_ids[:, past_length:]
1148
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1149
+
1150
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1151
+ if (
1152
+ max_cache_length is not None
1153
+ and attention_mask is not None
1154
+ and cache_length + input_ids.shape[1] > max_cache_length
1155
+ ):
1156
+ attention_mask = attention_mask[:, -max_cache_length:]
1157
+
1158
+ position_ids = kwargs.get("position_ids", None)
1159
+ if attention_mask is not None and position_ids is None:
1160
+ # create position_ids on the fly for batch generation
1161
+ position_ids = attention_mask.long().cumsum(-1) - 1
1162
+ position_ids.masked_fill_(attention_mask == 0, 1)
1163
+ if past_key_values:
1164
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1165
+
1166
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1167
+ if inputs_embeds is not None and past_length == 0:
1168
+ model_inputs = {"inputs_embeds": inputs_embeds}
1169
+ else:
1170
+ model_inputs = {"input_ids": input_ids}
1171
+
1172
+ model_inputs.update(
1173
+ {
1174
+ "position_ids": position_ids,
1175
+ "past_key_values": past_key_values,
1176
+ "use_cache": kwargs.get("use_cache"),
1177
+ "attention_mask": attention_mask,
1178
+ }
1179
+ )
1180
+ return model_inputs
1181
+
1182
+ @staticmethod
1183
+ def _reorder_cache(past_key_values, beam_idx):
1184
+ reordered_past = ()
1185
+ for layer_past in past_key_values:
1186
+ reordered_past += (
1187
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1188
+ )
1189
+ return reordered_past
1190
+
1191
+
1192
+ @add_start_docstrings(
1193
+ """
1194
+ The Qwen2 Model transformer with a sequence classification head on top (linear layer).
1195
+
1196
+ [`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1197
+ (e.g. GPT-2) do.
1198
+
1199
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1200
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1201
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1202
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1203
+ each row of the batch).
1204
+ """,
1205
+ QWEN2_START_DOCSTRING,
1206
+ )
1207
+ class Qwen2ForSequenceClassification(Qwen2PreTrainedModel):
1208
+ def __init__(self, config):
1209
+ super().__init__(config)
1210
+ self.num_labels = config.num_labels
1211
+ self.model = Qwen2Model(config)
1212
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1213
+
1214
+ # Initialize weights and apply final processing
1215
+ self.post_init()
1216
+
1217
+ def get_input_embeddings(self):
1218
+ return self.model.embed_tokens
1219
+
1220
+ def set_input_embeddings(self, value):
1221
+ self.model.embed_tokens = value
1222
+
1223
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1224
+ def forward(
1225
+ self,
1226
+ input_ids: torch.LongTensor = None,
1227
+ attention_mask: Optional[torch.Tensor] = None,
1228
+ position_ids: Optional[torch.LongTensor] = None,
1229
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1230
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1231
+ labels: Optional[torch.LongTensor] = None,
1232
+ use_cache: Optional[bool] = None,
1233
+ output_attentions: Optional[bool] = None,
1234
+ output_hidden_states: Optional[bool] = None,
1235
+ return_dict: Optional[bool] = None,
1236
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1237
+ r"""
1238
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1239
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1240
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1241
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1242
+ """
1243
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1244
+
1245
+ transformer_outputs = self.model(
1246
+ input_ids,
1247
+ attention_mask=attention_mask,
1248
+ position_ids=position_ids,
1249
+ past_key_values=past_key_values,
1250
+ inputs_embeds=inputs_embeds,
1251
+ use_cache=use_cache,
1252
+ output_attentions=output_attentions,
1253
+ output_hidden_states=output_hidden_states,
1254
+ return_dict=return_dict,
1255
+ )
1256
+ hidden_states = transformer_outputs[0]
1257
+ logits = self.score(hidden_states)
1258
+
1259
+ if input_ids is not None:
1260
+ batch_size = input_ids.shape[0]
1261
+ else:
1262
+ batch_size = inputs_embeds.shape[0]
1263
+
1264
+ if self.config.pad_token_id is None and batch_size != 1:
1265
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1266
+ if self.config.pad_token_id is None:
1267
+ sequence_lengths = -1
1268
+ else:
1269
+ if input_ids is not None:
1270
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1271
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1272
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1273
+ sequence_lengths = sequence_lengths.to(logits.device)
1274
+ else:
1275
+ sequence_lengths = -1
1276
+
1277
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1278
+
1279
+ loss = None
1280
+ if labels is not None:
1281
+ labels = labels.to(logits.device)
1282
+ if self.config.problem_type is None:
1283
+ if self.num_labels == 1:
1284
+ self.config.problem_type = "regression"
1285
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1286
+ self.config.problem_type = "single_label_classification"
1287
+ else:
1288
+ self.config.problem_type = "multi_label_classification"
1289
+
1290
+ if self.config.problem_type == "regression":
1291
+ loss_fct = MSELoss()
1292
+ if self.num_labels == 1:
1293
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1294
+ else:
1295
+ loss = loss_fct(pooled_logits, labels)
1296
+ elif self.config.problem_type == "single_label_classification":
1297
+ loss_fct = CrossEntropyLoss()
1298
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1299
+ elif self.config.problem_type == "multi_label_classification":
1300
+ loss_fct = BCEWithLogitsLoss()
1301
+ loss = loss_fct(pooled_logits, labels)
1302
+ if not return_dict:
1303
+ output = (pooled_logits,) + transformer_outputs[1:]
1304
+ return ((loss,) + output) if loss is not None else output
1305
+
1306
+ return SequenceClassifierOutputWithPast(
1307
+ loss=loss,
1308
+ logits=pooled_logits,
1309
+ past_key_values=transformer_outputs.past_key_values,
1310
+ hidden_states=transformer_outputs.hidden_states,
1311
+ attentions=transformer_outputs.attentions,
1312
+ )
1313
+
1314
+
1315
+ @add_start_docstrings(
1316
+ """
1317
+ The Qwen2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1318
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1319
+ """,
1320
+ QWEN2_START_DOCSTRING,
1321
+ )
1322
+ # Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Qwen2, LLAMA->QWEN2
1323
+ class Qwen2ForTokenClassification(Qwen2PreTrainedModel):
1324
+ def __init__(self, config):
1325
+ super().__init__(config)
1326
+ self.num_labels = config.num_labels
1327
+ self.model = Qwen2Model(config)
1328
+ if getattr(config, "classifier_dropout", None) is not None:
1329
+ classifier_dropout = config.classifier_dropout
1330
+ elif getattr(config, "hidden_dropout", None) is not None:
1331
+ classifier_dropout = config.hidden_dropout
1332
+ else:
1333
+ classifier_dropout = 0.1
1334
+ self.dropout = nn.Dropout(classifier_dropout)
1335
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1336
+
1337
+ # Initialize weights and apply final processing
1338
+ self.post_init()
1339
+
1340
+ def get_input_embeddings(self):
1341
+ return self.model.embed_tokens
1342
+
1343
+ def set_input_embeddings(self, value):
1344
+ self.model.embed_tokens = value
1345
+
1346
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1347
+ def forward(
1348
+ self,
1349
+ input_ids: torch.LongTensor = None,
1350
+ attention_mask: Optional[torch.Tensor] = None,
1351
+ position_ids: Optional[torch.LongTensor] = None,
1352
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1353
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1354
+ labels: Optional[torch.LongTensor] = None,
1355
+ use_cache: Optional[bool] = None,
1356
+ output_attentions: Optional[bool] = None,
1357
+ output_hidden_states: Optional[bool] = None,
1358
+ return_dict: Optional[bool] = None,
1359
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1360
+ r"""
1361
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1362
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1363
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1364
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1365
+ """
1366
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1367
+
1368
+ outputs = self.model(
1369
+ input_ids,
1370
+ attention_mask=attention_mask,
1371
+ position_ids=position_ids,
1372
+ past_key_values=past_key_values,
1373
+ inputs_embeds=inputs_embeds,
1374
+ use_cache=use_cache,
1375
+ output_attentions=output_attentions,
1376
+ output_hidden_states=output_hidden_states,
1377
+ return_dict=return_dict,
1378
+ )
1379
+ sequence_output = outputs[0]
1380
+ sequence_output = self.dropout(sequence_output)
1381
+ logits = self.score(sequence_output)
1382
+
1383
+ loss = None
1384
+ if labels is not None:
1385
+ loss_fct = CrossEntropyLoss()
1386
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1387
+
1388
+ if not return_dict:
1389
+ output = (logits,) + outputs[2:]
1390
+ return ((loss,) + output) if loss is not None else output
1391
+
1392
+ return TokenClassifierOutput(
1393
+ loss=loss,
1394
+ logits=logits,
1395
+ hidden_states=outputs.hidden_states,
1396
+ attentions=outputs.attentions,
1397
+ )
vision_niah_d/easy_context/ulysses_attn/__pycache__/monkey_patch.cpython-310.pyc ADDED
Binary file (2.43 kB). View file
 
vision_niah_d/easy_context/ulysses_attn/__pycache__/prepare_inputs.cpython-310.pyc ADDED
Binary file (871 Bytes). View file
 
vision_niah_d/easy_context/ulysses_attn/monkey_patch.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import transformers
2
+ from typing import List, Optional, Tuple, Union
3
+ import warnings
4
+ import torch
5
+ import torch.utils.checkpoint
6
+ try:
7
+ from yunchang.ulysses import UlyssesAttention
8
+ ulysses_attn = UlyssesAttention()
9
+ except:
10
+ ulysses_attn = None
11
+
12
+
13
+ def new_flash_attn_forward(
14
+ self,
15
+ query_states,
16
+ key_states,
17
+ value_states,
18
+ attention_mask,
19
+ query_length,
20
+ dropout=0.0,
21
+ softmax_scale=None,
22
+ use_sliding_windows=False,
23
+ ):
24
+ if not self._flash_attn_uses_top_left_mask:
25
+ causal = self.is_causal
26
+ else:
27
+ causal = self.is_causal and query_length != 1
28
+
29
+ # Contains at least one padding token in the sequence
30
+ assert attention_mask is None
31
+ assert causal is True
32
+ assert use_sliding_windows is False
33
+ attn_output = ulysses_attn(
34
+ query_states,
35
+ key_states,
36
+ value_states,
37
+ dropout,
38
+ softmax_scale,
39
+ causal=causal,
40
+ )
41
+
42
+ return attn_output
43
+
44
+
45
+ def new_decoder_forward(
46
+ self,
47
+ hidden_states: torch.Tensor,
48
+ attention_mask: Optional[torch.Tensor] = None,
49
+ position_ids: Optional[torch.LongTensor] = None,
50
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
51
+ output_attentions: Optional[bool] = False,
52
+ use_cache: Optional[bool] = False,
53
+ cache_position: Optional[torch.LongTensor] = None,
54
+ **kwargs,
55
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
56
+ assert isinstance(
57
+ self.self_attn, transformers.models.llama.modeling_llama.LlamaFlashAttention2
58
+ ) or isinstance(
59
+ self.self_attn,
60
+ transformers.models.mistral.modeling_mistral.MistralFlashAttention2,
61
+ ), "Please toggle on the Flash Attention 2 implementation when using zigzag ring attention monkey patch."
62
+
63
+ if "padding_mask" in kwargs:
64
+ warnings.warn(
65
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
66
+ )
67
+
68
+ residual = hidden_states
69
+
70
+ hidden_states = self.input_layernorm(hidden_states)
71
+
72
+ # Self Attention
73
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
74
+ hidden_states=hidden_states,
75
+ attention_mask=attention_mask,
76
+ position_ids=position_ids,
77
+ past_key_value=past_key_value,
78
+ output_attentions=output_attentions,
79
+ use_cache=use_cache,
80
+ cache_position=cache_position,
81
+ **kwargs,
82
+ )
83
+ hidden_states = residual + hidden_states
84
+
85
+ # Fully Connected
86
+ residual = hidden_states
87
+ hidden_states = self.post_attention_layernorm(hidden_states)
88
+ hidden_states = self.mlp(hidden_states)
89
+ hidden_states = residual + hidden_states
90
+
91
+ outputs = (hidden_states,)
92
+
93
+ if output_attentions:
94
+ outputs += (self_attn_weights,)
95
+
96
+ if use_cache:
97
+ outputs += (present_key_value,)
98
+
99
+ return outputs
100
+
101
+
102
+ def apply_ulysses_attn_monkey_patch_llama():
103
+ transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward = (
104
+ new_flash_attn_forward
105
+ )
106
+ transformers.models.llama.modeling_llama.LlamaDecoderLayer.forward = (
107
+ new_decoder_forward
108
+ )
109
+
110
+
vision_niah_d/easy_context/ulysses_attn/prepare_inputs.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def extract_local(value, rank, world_size, device, dim=1):
5
+ dimension_size = value.shape[dim]
6
+ sub_seq_length = dimension_size // world_size
7
+
8
+ sub_seq_start = rank * sub_seq_length
9
+ sub_seq_end = (rank + 1) * sub_seq_length
10
+ local_value = value[:, sub_seq_start:sub_seq_end]
11
+
12
+ return local_value.to(device)
13
+
14
+
15
+ def prepare_ulysses_attn_inputs(
16
+ input_ids, position_ids, target_ids, rank, world_size, device
17
+ ):
18
+
19
+ local_input_ids = extract_local(
20
+ input_ids,
21
+ rank,
22
+ world_size,
23
+ device,
24
+ )
25
+ local_position_ids = extract_local(
26
+ position_ids,
27
+ rank,
28
+ world_size,
29
+ device,
30
+ )
31
+
32
+ if target_ids is not None:
33
+ local_target_ids = extract_local(
34
+ target_ids,
35
+ rank,
36
+ world_size,
37
+ device,
38
+ )
39
+ else:
40
+ local_target_ids = None
41
+ return {
42
+ "local_input_ids": local_input_ids,
43
+ "local_position_ids": local_position_ids,
44
+ "local_target_ids": local_target_ids,
45
+ }
vision_niah_d/easy_context/unsloth_offloaded_gradient_checkpoint/__pycache__/monkey_patch.cpython-310.pyc ADDED
Binary file (2.41 kB). View file
 
vision_niah_d/easy_context/unsloth_offloaded_gradient_checkpoint/monkey_patch.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import torch
16
+ import transformers
17
+ import inspect
18
+
19
+
20
+ class Unsloth_Offloaded_Gradient_Checkpointer(torch.autograd.Function):
21
+ """
22
+ Saves VRAM by smartly offloading to RAM.
23
+ Tiny hit to performance, since we mask the movement via non blocking calls.
24
+ """
25
+
26
+ @staticmethod
27
+ @torch.cuda.amp.custom_fwd
28
+ def forward(ctx, forward_function, hidden_states, *args):
29
+ saved_hidden_states = hidden_states.to("cpu", non_blocking=True)
30
+ with torch.no_grad():
31
+ output = forward_function(hidden_states, *args)
32
+ ctx.save_for_backward(saved_hidden_states)
33
+ ctx.forward_function = forward_function
34
+ ctx.args = args
35
+
36
+ return output
37
+
38
+ pass
39
+
40
+ @staticmethod
41
+ @torch.cuda.amp.custom_bwd
42
+ def backward(ctx, dY):
43
+ (hidden_states,) = ctx.saved_tensors
44
+ hidden_states = hidden_states.to("cuda", non_blocking=True).detach()
45
+ hidden_states.requires_grad = True
46
+ with torch.enable_grad():
47
+ (output,) = ctx.forward_function(hidden_states, *ctx.args)
48
+ torch.autograd.backward(output, dY)
49
+ return (
50
+ None,
51
+ hidden_states.grad,
52
+ ) + (
53
+ None,
54
+ ) * len(ctx.args)
55
+
56
+ pass
57
+
58
+
59
+ pass
60
+
61
+
62
+ def new_gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None):
63
+ assert gradient_checkpointing_kwargs == None
64
+ if not self.supports_gradient_checkpointing:
65
+ raise ValueError(
66
+ f"{self.__class__.__name__} does not support gradient checkpointing."
67
+ )
68
+
69
+ gradient_checkpointing_func = Unsloth_Offloaded_Gradient_Checkpointer.apply
70
+ # For old GC format (transformers < 4.35.0) for models that live on the Hub
71
+ # we will fall back to the overwritten `_set_gradient_checkpointing` method
72
+ _is_using_old_format = (
73
+ "value" in inspect.signature(self._set_gradient_checkpointing).parameters
74
+ )
75
+
76
+ if not _is_using_old_format:
77
+ self._set_gradient_checkpointing(
78
+ enable=True, gradient_checkpointing_func=gradient_checkpointing_func
79
+ )
80
+ else:
81
+ raise NotImplementedError()
82
+
83
+ if getattr(self, "_hf_peft_config_loaded", False):
84
+ # When using PEFT + gradient checkpointing + Trainer we need to make sure the input has requires_grad=True
85
+ # we do it also on PEFT: https://github.com/huggingface/peft/blob/85013987aa82aa1af3da1236b6902556ce3e483e/src/peft/peft_model.py#L334
86
+ # When training with PEFT, only LoRA layers will have requires grad set to True, but the output of frozen layers need to propagate
87
+ # the gradients to make sure the gradient flows.
88
+ self.enable_input_require_grads()
89
+
90
+
91
+ def apply_unsloth_offloaded_gradient_checkpoint_monkey_patch():
92
+ transformers.modeling_utils.PreTrainedModel.gradient_checkpointing_enable = (
93
+ new_gradient_checkpointing_enable
94
+ )
vision_niah_d/easy_context/zigzag_ring_attn/__pycache__/monkey_patch.cpython-310.pyc ADDED
Binary file (2.62 kB). View file
 
vision_niah_d/easy_context/zigzag_ring_attn/__pycache__/prepare_inputs.cpython-310.pyc ADDED
Binary file (867 Bytes). View file
 
vision_niah_d/easy_context/zigzag_ring_attn/monkey_patch.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import transformers
2
+ from typing import List, Optional, Tuple, Union
3
+ import warnings
4
+ import torch
5
+ import torch.utils.checkpoint
6
+ from ring_flash_attn.zigzag_ring_flash_attn import zigzag_ring_flash_attn_func
7
+
8
+
9
+ def new_flash_attn_forward(
10
+ self,
11
+ query_states,
12
+ key_states,
13
+ value_states,
14
+ attention_mask,
15
+ query_length,
16
+ dropout=0.0,
17
+ softmax_scale=None,
18
+ use_sliding_windows=False,
19
+ ):
20
+ if not self._flash_attn_uses_top_left_mask:
21
+ causal = self.is_causal
22
+ else:
23
+ causal = self.is_causal and query_length != 1
24
+
25
+ # Contains at least one padding token in the sequence
26
+ assert attention_mask is None
27
+ assert causal is True
28
+ assert use_sliding_windows is False
29
+ attn_output = zigzag_ring_flash_attn_func(
30
+ query_states,
31
+ key_states,
32
+ value_states,
33
+ dropout,
34
+ softmax_scale,
35
+ causal=causal,
36
+ )
37
+
38
+ return attn_output
39
+
40
+
41
+ def new_decoder_forward(
42
+ self,
43
+ hidden_states: torch.Tensor,
44
+ attention_mask: Optional[torch.Tensor] = None,
45
+ position_ids: Optional[torch.LongTensor] = None,
46
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
47
+ output_attentions: Optional[bool] = False,
48
+ use_cache: Optional[bool] = False,
49
+ cache_position: Optional[torch.LongTensor] = None,
50
+ **kwargs,
51
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
52
+ assert isinstance(
53
+ self.self_attn, transformers.models.llama.modeling_llama.LlamaFlashAttention2
54
+ ) or isinstance(
55
+ self.self_attn,
56
+ transformers.models.mistral.modeling_mistral.MistralFlashAttention2,
57
+ ), "Please toggle on the Flash Attention 2 implementation when using zigzag ring attention monkey patch."
58
+
59
+ if "padding_mask" in kwargs:
60
+ warnings.warn(
61
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
62
+ )
63
+
64
+ residual = hidden_states
65
+
66
+ hidden_states = self.input_layernorm(hidden_states)
67
+
68
+ # Self Attention
69
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
70
+ hidden_states=hidden_states,
71
+ attention_mask=attention_mask,
72
+ position_ids=position_ids,
73
+ past_key_value=past_key_value,
74
+ output_attentions=output_attentions,
75
+ use_cache=use_cache,
76
+ cache_position=cache_position,
77
+ **kwargs,
78
+ )
79
+ hidden_states = residual + hidden_states
80
+
81
+ # Fully Connected
82
+ residual = hidden_states
83
+ hidden_states = self.post_attention_layernorm(hidden_states)
84
+ hidden_states = self.mlp(hidden_states)
85
+ hidden_states = residual + hidden_states
86
+
87
+ outputs = (hidden_states,)
88
+
89
+ if output_attentions:
90
+ outputs += (self_attn_weights,)
91
+
92
+ if use_cache:
93
+ outputs += (present_key_value,)
94
+
95
+ return outputs
96
+
97
+
98
+ def apply_zigzag_ring_attn_monkey_patch_llama():
99
+ transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward = (
100
+ new_flash_attn_forward
101
+ )
102
+ transformers.models.llama.modeling_llama.LlamaDecoderLayer.forward = (
103
+ new_decoder_forward
104
+ )
105
+
106
+
107
+ def apply_zigzag_ring_attn_monkey_patch_mistral():
108
+ transformers.models.mistral.modeling_mistral.MistralFlashAttention2._flash_attention_forward = (
109
+ new_flash_attn_forward
110
+ )
111
+ transformers.models.mistral.modeling_mistral.MistralDecoderLayer.forward = (
112
+ new_decoder_forward
113
+ )
vision_niah_d/easy_context/zigzag_ring_attn/prepare_inputs.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def extract_local(value, rank, world_size, device, dim=1):
5
+ value_chunks = value.chunk(2 * world_size, dim=dim)
6
+ local_value = torch.cat(
7
+ [value_chunks[rank], value_chunks[2 * world_size - rank - 1]], dim=dim
8
+ )
9
+ return local_value.to(device)
10
+
11
+
12
+ def prepare_zigzag_ring_attn_inputs(
13
+ input_ids, position_ids, target_ids, rank, world_size, device
14
+ ):
15
+ local_input_ids = extract_local(
16
+ input_ids,
17
+ rank,
18
+ world_size,
19
+ device,
20
+ )
21
+ local_position_ids = extract_local(
22
+ position_ids,
23
+ rank,
24
+ world_size,
25
+ device,
26
+ dim=2
27
+ )
28
+ if target_ids is not None:
29
+ local_target_ids = extract_local(
30
+ target_ids,
31
+ rank,
32
+ world_size,
33
+ device,
34
+ )
35
+ else:
36
+ local_target_ids = None
37
+ return {
38
+ "local_input_ids": local_input_ids,
39
+ "local_position_ids": local_position_ids,
40
+ "local_target_ids": local_target_ids,
41
+ }
vision_niah_d/eval_debug.sh ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -x
3
+
4
+ models=(
5
+ # "/mnt/petrelfs/weixilin/cache/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video"
6
+ # "/mnt/petrelfs/weixilin/cache/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video"
7
+ "/mnt/petrelfs/weixilin/cache/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video"
8
+ "/mnt/petrelfs/weixilin/cache/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video"
9
+ # "/mnt/petrelfs/weixilin/cache/Qwen2-VL-t_only-128frames-16card_8k-context-330k-llava-video"
10
+ )
11
+ rope_types=(
12
+ # "t_scale2_change_freq"
13
+ "vanilla_rope"
14
+ "time_rope"
15
+ # "t_only"
16
+ )
17
+
18
+ base_port=6015
19
+
20
+ for i in "${!models[@]}"; do
21
+ model=${models[$i]}
22
+ rope_type=${rope_types[$i]}
23
+
24
+ port=$((base_port + i))
25
+
26
+ echo "evaluating model: $model"
27
+ echo "using rope_type: $rope_type"
28
+ echo "port: $port"
29
+
30
+ accelerate launch --num_processes 8 --config_file vision_niah_d/easy_context/accelerate_configs/deepspeed_inference.yaml \
31
+ --main_process_port "$port" vision_niah_d/eval_vision_niah.py \
32
+ --model "$model" \
33
+ --needle_dataset vision_niah_d/needle_datasets/dataset.json \
34
+ --needle_embedding_dir vision_niah_d/video_needle_haystack/data/needle_qwen2_embeddings_144tokens_dataset \
35
+ --haystack_dir vision_niah_d/video_needle_haystack/data/haystack_qwen2_embeddings_6000frames \
36
+ --prompt_template qwen2 \
37
+ --max_frame_num 3000 \
38
+ --min_frame_num 100 \
39
+ --frame_interval 200 \
40
+ --output_path vision_niah_d/niah_output/ \
41
+ --rope_type "$rope_type" \
42
+ --image_tokens 144 \
43
+ --depth_interval 0.2
44
+
45
+ echo "model $model evaluation has done."
46
+ echo "------------------------------------"
47
+ done
vision_niah_d/eval_debug_interrupt.sh ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -x
3
+
4
+ models=(
5
+ "/mnt/petrelfs/weixilin/cache/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video"
6
+ )
7
+ rope_types=(
8
+ # "m_rope"
9
+ "videorope"
10
+ )
11
+
12
+ # basic port
13
+ base_port=6011
14
+
15
+ # iterate each model
16
+ for i in "${!models[@]}"; do
17
+ model=${models[$i]}
18
+ rope_type=${rope_types[$i]}
19
+
20
+ port=$((base_port + i))
21
+
22
+ echo "evaluating model: $model"
23
+ echo "using rope_type: $rope_type"
24
+ echo "port: $port"
25
+
26
+ accelerate launch --num_processes 8 --config_file easy_context/accelerate_configs/deepspeed_inference.yaml \
27
+ --main_process_port "$port" vision_niah_d/eval_vision_niah_interrupt.py \
28
+ --model "$model" \
29
+ --needle_dataset vision_niah_d/needle_datasets/dataset.json \
30
+ --needle_embedding_dir vision_niah_d/video_needle_haystack/data/needle_qwen2_embeddings_144tokens_dataset \
31
+ --needle_embedding_interrupt_dir vision_niah_d/video_needle_haystack/data/needle_qwen2_embeddings_144tokens_dataset_interrupt \
32
+ --haystack_dir vision_niah_d/video_needle_haystack/data/haystack_qwen2_embeddings_6000frames \
33
+ --prompt_template qwen2 \
34
+ --max_frame_num 3000 \
35
+ --min_frame_num 100 \
36
+ --frame_interval 200 \
37
+ --output_path vision_niah_d/niah_output_interrupt \
38
+ --rope_type "$rope_type" \
39
+ --image_tokens 144 \
40
+ --depth_interval 0.2
41
+
42
+ echo "model $model evaluation has done."
43
+ echo "------------------------------------"
44
+ done
vision_niah_d/eval_vision_niah.py ADDED
@@ -0,0 +1,552 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import gc
3
+ import sys
4
+ import torch
5
+ from transformers import AutoTokenizer
6
+ from transformers import LlamaForCausalLM
7
+ from easy_context import Qwen2ForCausalLM_RingAttn
8
+ from tqdm import tqdm
9
+ from accelerate import Accelerator
10
+ import glob
11
+ import numpy as np
12
+ from tqdm import tqdm
13
+ import gc
14
+ import matplotlib.pyplot as plt
15
+ import os
16
+ from matplotlib.colors import LinearSegmentedColormap
17
+ import seaborn as sns
18
+ import pandas as pd
19
+ from pathlib import Path
20
+ import random
21
+ import json
22
+ from datasets import load_dataset
23
+ from vision_niah.produce_needle_embedding import read_json_file
24
+ from easy_context import (
25
+ prepare_seq_parallel_inputs,
26
+ apply_seq_parallel_monkey_patch,
27
+ )
28
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
29
+ from torchvision import io, transforms
30
+ from torchvision.transforms import InterpolationMode
31
+ apply_seq_parallel_monkey_patch("zigzag_ring_attn", "llama")
32
+
33
+ import sys
34
+ import pdb
35
+ class ForkedPdb(pdb.Pdb):
36
+ """A Pdb subclass that may be used
37
+ from a forked multiprocessing child
38
+ """
39
+ def interaction(self, *args, **kwargs):
40
+ _stdin = sys.stdin
41
+ try:
42
+ sys.stdin = open('/dev/stdin')
43
+ pdb.Pdb.interaction(self, *args, **kwargs)
44
+ finally:
45
+ sys.stdin = _stdin
46
+
47
+ SEED = 24242424
48
+ torch.manual_seed(SEED)
49
+ random.seed(SEED)
50
+ np.random.seed(SEED)
51
+ IMAGE_TOKENS = None
52
+ prompt_templates = {
53
+ "mistral": {
54
+ "preprompt": "<s>[INST]",
55
+ "postprompt": " [/INST]"
56
+ },
57
+ "vicuna": {
58
+ "preprompt": "<s>A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER:",
59
+ "postprompt": "ASSISTANT:"
60
+ },
61
+ "llama3": {
62
+ "preprompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n",
63
+ "postprompt": "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
64
+ },
65
+ "qwen2": {
66
+ "preprompt": "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n",
67
+ "postprompt": "<|im_end|>\n<|im_start|>assistant\n",
68
+ },
69
+ "yi": {
70
+ "preprompt": "<|im_start|>system\nAnswer the questions.<|im_end|>\n<|im_start|>user\n",
71
+ "postprompt": "<|im_end|>\n<|im_start|>assistant\n",
72
+ },
73
+ }
74
+ # \nAnswer the question using a single word or phrase.
75
+ # The color of the bottle cap is
76
+ # answer = "Yellow"
77
+
78
+
79
+ def safe_tokenize(tokenizer, text):
80
+ tokenized = tokenizer.encode(text, return_tensors="pt")
81
+ if tokenizer.bos_token != None and len(tokenized) > 0 and tokenized[0, 0] == tokenizer.bos_token_id:
82
+ tokenized = tokenized[:, 1:]
83
+ return tokenized
84
+
85
+ def get_vanilla_rope_index(input_embeds, video_se):
86
+ return torch.arange(input_embeds.shape[1]).view(1, 1, -1).expand(3, 1, -1)
87
+
88
+ def get_time_rope_index(input_embeds, video_se):
89
+ llm_pos_ids_list = []
90
+ llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1))
91
+ assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float'
92
+ nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS
93
+ ## time rope
94
+ t_index = torch.arange(llm_pos_ids_list[-1].max().item() + 1, llm_pos_ids_list[-1].max().item() + 1 + nframes).repeat_interleave(IMAGE_TOKENS, dim=0).view(1, 1, -1).expand(3, 1, -1)
95
+ llm_pos_ids_list.append(t_index)
96
+ if input_embeds.shape[1] > video_se[1]:
97
+ text_len = input_embeds.shape[1] - video_se[1]
98
+ llm_pos_ids_list.append(torch.arange(t_index.max().item() + 1, text_len + t_index.max().item() + 1).view(1, 1, -1).expand(3, 1, -1))
99
+ position_ids = torch.cat(llm_pos_ids_list, dim=-1)
100
+ assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}'
101
+ return position_ids
102
+
103
+ def get_t_scale2_rope_index(input_embeds, video_se, scale_factor):
104
+ llm_pos_ids_list = []
105
+ llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1))
106
+ st_idx = llm_pos_ids_list[-1][0].max() + 1 if len(llm_pos_ids_list) > 0 else 0
107
+ assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float'
108
+ nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS
109
+ ## m_rope rope
110
+ llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16
111
+
112
+ t_index = torch.arange(llm_grid_t).view(-1, 1).expand(
113
+ -1, llm_grid_h * llm_grid_w).flatten()
114
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(
115
+ llm_grid_t, -1, llm_grid_w).flatten() - (llm_grid_h-1) // 2
116
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(
117
+ llm_grid_t, llm_grid_h, -1).flatten() - (llm_grid_w-1) // 2
118
+ t_index = t_index * scale_factor
119
+ t_index = t_index + st_idx
120
+ h_index = h_index + t_index
121
+ w_index = w_index + t_index
122
+
123
+ llm_pos_ids_list.append(
124
+ torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1))
125
+
126
+ if input_embeds.shape[1] > video_se[1]:
127
+ text_len = input_embeds.shape[1] - video_se[1]
128
+ # print(text_len)
129
+
130
+ llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1][0].max().item() + 1, llm_pos_ids_list[-1][0].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1))
131
+ # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape)
132
+ position_ids = torch.cat(llm_pos_ids_list, dim=-1)
133
+ assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}'
134
+ return position_ids
135
+
136
+ def get_m_rope_index(input_embeds, video_se):
137
+ llm_pos_ids_list = []
138
+ llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1))
139
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
140
+ assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float'
141
+ nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS
142
+ ## m_rope rope
143
+ llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16
144
+ t_index = torch.arange(nframes).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
145
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
146
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
147
+ llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1) + st_idx)
148
+ if input_embeds.shape[1] > video_se[1]:
149
+ text_len = input_embeds.shape[1] - video_se[1]
150
+ llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1].max().item() + 1, llm_pos_ids_list[-1].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1))
151
+ position_ids = torch.cat(llm_pos_ids_list, dim=-1)
152
+ assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}'
153
+ return position_ids
154
+
155
+ def get_m_modify_margin_index(input_embeds, video_se):
156
+ llm_pos_ids_list = []
157
+ llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1))
158
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
159
+ assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float'
160
+ nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS
161
+ ## m_rope rope
162
+ llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16
163
+
164
+ t_index = torch.arange(llm_grid_t).view(-1, 1).expand(
165
+ -1, llm_grid_h * llm_grid_w).flatten()
166
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(
167
+ llm_grid_t, -1, llm_grid_w).flatten() - (llm_grid_h-1) // 2
168
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(
169
+ llm_grid_t, llm_grid_h, -1).flatten() - (llm_grid_w-1) // 2
170
+ t_index = t_index + st_idx
171
+ h_index = h_index + t_index
172
+ w_index = w_index + t_index
173
+
174
+ llm_pos_ids_list.append(
175
+ torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1))
176
+
177
+ if input_embeds.shape[1] > video_se[1]:
178
+ text_len = input_embeds.shape[1] - video_se[1]
179
+ # print(text_len)
180
+
181
+ llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1].max().item() + 1, llm_pos_ids_list[-1].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1))
182
+ # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape)
183
+ position_ids = torch.cat(llm_pos_ids_list, dim=-1)
184
+ assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}'
185
+ return position_ids
186
+
187
+ def get_m_modify_no_center_index(input_embeds, video_se):
188
+ llm_pos_ids_list = []
189
+ llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1))
190
+ st_idx = llm_pos_ids_list[-1][0].max() + 1 if len(llm_pos_ids_list) > 0 else 0
191
+ assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float'
192
+ nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS
193
+ ## m_rope rope
194
+ llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16
195
+
196
+ t_index = torch.arange(llm_grid_t).view(-1, 1).expand(
197
+ -1, llm_grid_h * llm_grid_w).flatten()
198
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(
199
+ llm_grid_t, -1, llm_grid_w).flatten()
200
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(
201
+ llm_grid_t, llm_grid_h, -1).flatten()
202
+
203
+ llm_pos_ids_list.append(
204
+ torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1) + st_idx)
205
+
206
+ if input_embeds.shape[1] > video_se[1]:
207
+ text_len = input_embeds.shape[1] - video_se[1]
208
+ # print(text_len)
209
+
210
+ llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1][0].max().item() + 1, llm_pos_ids_list[-1][0].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1))
211
+ # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape)
212
+ position_ids = torch.cat(llm_pos_ids_list, dim=-1)
213
+ assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}'
214
+ return position_ids
215
+
216
+ def get_m_modify_index(input_embeds, video_se):
217
+ llm_pos_ids_list = []
218
+ llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1))
219
+ st_idx = llm_pos_ids_list[-1][0].max() + 1 if len(llm_pos_ids_list) > 0 else 0
220
+ assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float'
221
+ nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS
222
+ ## m_rope rope
223
+ llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16
224
+
225
+ t_index = torch.arange(llm_grid_t).view(-1, 1).expand(
226
+ -1, llm_grid_h * llm_grid_w).flatten()
227
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(
228
+ llm_grid_t, -1, llm_grid_w).flatten() - (llm_grid_h-1) // 2
229
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(
230
+ llm_grid_t, llm_grid_h, -1).flatten() - (llm_grid_w-1) // 2
231
+ t_index = t_index + st_idx
232
+ h_index = h_index + t_index
233
+ w_index = w_index + t_index
234
+
235
+ llm_pos_ids_list.append(
236
+ torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1))
237
+
238
+ if input_embeds.shape[1] > video_se[1]:
239
+ text_len = input_embeds.shape[1] - video_se[1]
240
+ # print(text_len)
241
+
242
+ llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1][0].max().item() + 1, llm_pos_ids_list[-1][0].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1))
243
+ # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape)
244
+ position_ids = torch.cat(llm_pos_ids_list, dim=-1)
245
+ assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}'
246
+ return position_ids
247
+
248
+ def get_position_ids(input_embeds, rope_type, video_se):
249
+ if rope_type == 'vanilla_rope':
250
+ return get_vanilla_rope_index(input_embeds, video_se)
251
+ elif rope_type == 'tad_rope':
252
+ return get_time_rope_index(input_embeds, video_se) + get_vanilla_rope_index(input_embeds, video_se)
253
+ elif rope_type == 'm_rope':
254
+ return get_m_rope_index(input_embeds, video_se)
255
+ elif rope_type == 'videorope':
256
+ scale_factor = 2.0
257
+ return get_t_scale2_rope_index(input_embeds, video_se, scale_factor)
258
+ else:
259
+ raise ValueError(f"not this rope: {rope_type}")
260
+
261
+ # answer = "more bet"
262
+ def eval_forward(args, video_se, accelerator, model, input_embeds, answer_embeds, pad_id, answer_ids, tokenizer):
263
+ # first append answer_embeds to input_embeds
264
+ prompt_length = input_embeds.shape[1]
265
+ labels_length = answer_embeds.shape[1]
266
+ input_embeds = torch.cat([input_embeds, answer_embeds], dim=1)
267
+ # second pad input_embeds to the multiple of accelerator.num_processes
268
+ pad_tensor = torch.tensor(
269
+ [pad_id]
270
+ * (
271
+ (accelerator.num_processes * 2)
272
+ - input_embeds.shape[1] % (accelerator.num_processes * 2)
273
+ )
274
+ ).unsqueeze(0).unsqueeze(-1).expand(-1, -1, input_embeds.shape[-1]).to(accelerator.device)
275
+ input_embeds = torch.cat([input_embeds, pad_tensor], dim=1)
276
+ # position_ids = (
277
+ # torch.arange(input_embeds.shape[1]).unsqueeze(0).expand(input_embeds.shape[0], -1)
278
+ # ).to(accelerator.device)
279
+ position_ids = get_position_ids(input_embeds, args.rope_type, video_se)
280
+ # ForkedPdb().set_trace()
281
+ accelerator.print(input_embeds.shape)
282
+ prepared = prepare_seq_parallel_inputs(
283
+ "zigzag_ring_attn",
284
+ input_embeds,
285
+ position_ids,
286
+ None,
287
+ accelerator.process_index,
288
+ accelerator.num_processes,
289
+ accelerator.device,
290
+ )
291
+ local_input_embeds = prepared["local_input_ids"]
292
+ local_position_ids = prepared["local_position_ids"]
293
+ if 'm_modify' in args.rope_type or 't_only' in args.rope_type or 'change_freq' in args.rope_type:
294
+ from transformers.models.qwen2_vl import modeling_qwen2_vl
295
+ modeling_qwen2_vl.apply_multimodal_rotary_pos_emb = modeling_qwen2_vl.apply_m_modify_multimodal_rotary_pos_emb
296
+ with torch.inference_mode():
297
+ hidden_states = model.model(
298
+ inputs_embeds=local_input_embeds,
299
+ position_ids=local_position_ids,
300
+ use_cache=False,
301
+ )[0]
302
+ logits = model.lm_head(hidden_states)
303
+ logits = logits.float()
304
+
305
+ pred = logits.argmax(dim=-1)
306
+
307
+ # gather all logits using accelerator.gather
308
+ def undo_extract_local(gathered_value, world_size, dim=1):
309
+ value_chunks = gathered_value.chunk(2 * world_size, dim=dim)
310
+ reordered_chunks = [None] * (2 * world_size)
311
+ for i in range(world_size):
312
+ reordered_chunks[i] = value_chunks[i * 2]
313
+ reordered_chunks[2 * world_size - i - 1] = value_chunks[i * 2 + 1]
314
+ return torch.cat(reordered_chunks, dim=dim)
315
+
316
+ correct = False
317
+
318
+ gathered_logits = accelerator.gather(pred.squeeze(0)).unsqueeze(0)
319
+ # undo extract local on the gathered logits
320
+ # ForkedPdb().set_trace()
321
+ pred = undo_extract_local(gathered_logits, accelerator.num_processes)
322
+ pred = pred[:, prompt_length - 1 : prompt_length + labels_length - 1]
323
+ # check if the logits are correct, extract argmax id
324
+ # compare the predicted_ids with the labels
325
+ correct = (pred == answer_ids.to(accelerator.device)).all()
326
+ if accelerator.is_main_process:
327
+ print(
328
+ "Predicted: ",
329
+ tokenizer.decode(pred.squeeze().tolist()),
330
+ "Answer: ",
331
+ tokenizer.decode(answer_ids.squeeze().tolist()),
332
+ )
333
+ # print id as well
334
+ print(
335
+ "Predicted: ",
336
+ pred.squeeze().tolist(),
337
+ "Answer: ",
338
+ answer_ids.squeeze().tolist(),
339
+ )
340
+ return int(correct)
341
+
342
+
343
+ def load_haystack(args, accelerator):
344
+ haystack_embeddings = torch.load(f"{args.haystack_dir}/video_embeddings.pt").to(torch.bfloat16)
345
+
346
+ return haystack_embeddings
347
+
348
+ def load_text_embeddings(str, tokenizer, model, accelerator, replace_double_newline=False):
349
+ token_ids = safe_tokenize(tokenizer, str)
350
+ def replace_double_newline_func(token_ids):
351
+ # subsitute token id 271 to two 198]
352
+ # for example:
353
+ # from: tensor([[128000, 128006, 9125, 128007, 271, 2675, 527, 264, 11190, 4221, 323, 11376, 18328, 13]])
354
+ # to: tensor([[128000, 128006, 9125, 128007, 198, 198, 2675, 527, 264, 11190, 4221, 323, 11376, 18328, 13]])
355
+ # length will increase by number of 271
356
+ double_newline_loc = (token_ids == 271).nonzero()[:, 1]
357
+ double_newline_loc += torch.arange(len(double_newline_loc))
358
+ if len(double_newline_loc) > 0:
359
+ for loc in double_newline_loc:
360
+ token_ids = torch.cat([token_ids[:, :loc], torch.tensor([[198, 198]]), token_ids[:, loc+1:]], dim=1)
361
+ return token_ids
362
+ if replace_double_newline:
363
+ token_ids = replace_double_newline_func(token_ids)
364
+ token_ids = token_ids.to(accelerator.device)
365
+ with torch.inference_mode():
366
+ embeddings = model.model.embed_tokens(token_ids)
367
+ return embeddings.to(torch.bfloat16)
368
+
369
+ def inference(args):
370
+ accelerator = Accelerator(
371
+ mixed_precision="bf16",
372
+ )
373
+ model_path = args.model
374
+ model = Qwen2VLForConditionalGeneration.from_pretrained(model_path,
375
+ device_map=accelerator.device,
376
+ torch_dtype=torch.bfloat16,
377
+ attn_implementation="flash_attention_2"
378
+ )
379
+ del model.visual
380
+ processor = AutoProcessor.from_pretrained("/mnt/hwfile/mllm/weixilin/cache/Qwen2-VL-7B-Instruct")
381
+ tokenizer = processor.tokenizer
382
+
383
+
384
+ kwargs = {"rope_theta": args.rope_theta} if args.rope_theta is not None else {}
385
+ tokenizer.pad_token = tokenizer.eos_token
386
+ # remember to remove <s>
387
+ accelerator.print("Preparing Haystack...")
388
+ haystack_embeddings = load_haystack(args, accelerator)
389
+ target_length = args.max_frame_num * IMAGE_TOKENS
390
+ # ForkedPdb().set_trace()
391
+ if len(haystack_embeddings) < target_length:
392
+ repeat_times = (target_length + len(haystack_embeddings) - 1) // len(haystack_embeddings) # 向上取整计算需要重复的次数
393
+ haystack_embeddings = torch.cat([haystack_embeddings] * repeat_times, dim=0)[:target_length]
394
+
395
+ assert len(haystack_embeddings) >= args.max_frame_num * IMAGE_TOKENS, "Haystack embeddings are not enough. Max frame {} is not found. Currently only {} frames.".format(args.max_frame_num, len(haystack_embeddings))
396
+ # import pdb; pdb.set_trace()
397
+
398
+ haystack_embeddings = haystack_embeddings[:args.max_frame_num * IMAGE_TOKENS].to(accelerator.device)
399
+ prompt = prompt_templates[args.prompt_template]
400
+ preprompt_embeddings = load_text_embeddings(prompt["preprompt"], tokenizer, model, accelerator, args.replace_double_newline)
401
+ postprompt_embeddings = load_text_embeddings(prompt["postprompt"], tokenizer, model, accelerator, args.replace_double_newline)
402
+
403
+ needle_dataset = read_json_file(args.needle_dataset)
404
+ answer_embedding_list = []
405
+ answer_id_list = []
406
+ needle_embedding_list = []
407
+ question_embeding_list = []
408
+ for index, instance in enumerate(needle_dataset):
409
+ answer = instance["answer"]
410
+ question = instance["prompt"]
411
+ needle_embedding_list.append(torch.load(args.needle_embedding_dir + f"/{index}.pt", map_location="cpu").to(torch.bfloat16).to(accelerator.device))
412
+ answer_embedding_list.append(load_text_embeddings(answer, tokenizer, model, accelerator))
413
+ answer_id_list.append(safe_tokenize(tokenizer, answer))
414
+ question_embeding_list.append(load_text_embeddings(question, tokenizer, model, accelerator))
415
+
416
+ accelerator.print("Starting Evaluation...")
417
+ model = accelerator.prepare(model)
418
+ model.gradient_checkpointing_enable()
419
+ all_accuries = []
420
+ for num_frames in tqdm(
421
+ range(
422
+ args.min_frame_num, args.max_frame_num + 1, args.frame_interval
423
+ )
424
+ ):
425
+ for depth in np.arange(0, 1 + args.depth_interval, args.depth_interval):
426
+ accuracies = []
427
+ for question_embedding, needle_embedding, answer_embedding, answer_id in zip(question_embeding_list, needle_embedding_list, answer_embedding_list, answer_id_list):
428
+ query_frame_idx = int(depth * num_frames)
429
+ # import pdb; pdb.set_trace()
430
+ input_frames = torch.cat([haystack_embeddings[:query_frame_idx * IMAGE_TOKENS].to(accelerator.device),needle_embedding.to(accelerator.device), haystack_embeddings[query_frame_idx*IMAGE_TOKENS:num_frames*IMAGE_TOKENS].to(accelerator.device)], dim=0).view(-1, haystack_embeddings.shape[-1]).unsqueeze(0)
431
+ input_emebds = torch.cat([preprompt_embeddings.to(accelerator.device), input_frames.to(accelerator.device),question_embedding.to(accelerator.device), postprompt_embeddings.to(accelerator.device)], dim=1)
432
+ video_se = (preprompt_embeddings.shape[1], preprompt_embeddings.shape[1] + input_frames.shape[1])
433
+ correct = eval_forward(
434
+ args, video_se, accelerator, model, input_emebds, answer_embedding, tokenizer.pad_token_id, answer_id, tokenizer
435
+ )
436
+ gc.collect()
437
+ torch.cuda.empty_cache()
438
+ if accelerator.is_main_process:
439
+ accuracies.append(correct)
440
+ if accelerator.is_main_process:
441
+ result = {
442
+ "Num. Frame": num_frames,
443
+ "Frame Depth": round(depth * 100, -1),
444
+ "Score": sum(accuracies) / len(accuracies),
445
+ }
446
+ accelerator.print(result)
447
+ all_accuries.append(result)
448
+ if accelerator.is_main_process:
449
+ model_name = args.model.split("/")[-1]
450
+ os.makedirs(f"{args.output_path}/{model_name}", exist_ok=True)
451
+ # save all_accuries as json
452
+ with open(f"{args.output_path}/{model_name}/all_accuracies.json", "w") as f:
453
+ json.dump(all_accuries, f, indent=4)
454
+ return all_accuries, accelerator
455
+
456
+
457
+ def plot(args, all_accuries):
458
+ df = pd.DataFrame(all_accuries)
459
+ cmap = LinearSegmentedColormap.from_list(
460
+ "custom_cmap", ["#F0496E", "#EBB839", "#9ad5b3"]
461
+ )
462
+
463
+ pivot_table = pd.pivot_table(
464
+ df,
465
+ values="Score",
466
+ index=["Frame Depth", "Num. Frame"],
467
+ aggfunc="mean",
468
+ ).reset_index() # This will aggregate
469
+ pivot_table = pivot_table.pivot(
470
+ index="Frame Depth", columns="Num. Frame", values="Score"
471
+ )
472
+ # Create the heatmap with better aesthetics
473
+ plt.figure(figsize=(17.5, 8)) # Can adjust these dimensions as needed
474
+ ax = sns.heatmap(
475
+ pivot_table,
476
+ # annot=True,
477
+ fmt="g",
478
+ vmin=0,
479
+ vmax=1,
480
+ linecolor='white',
481
+ linewidths=1.5,
482
+ cmap=cmap,
483
+ cbar_kws={"label": "Score"},
484
+ )
485
+
486
+ # Set the color bar label font size
487
+ cbar = ax.collections[0].colorbar
488
+ cbar.ax.yaxis.label.set_size(14)
489
+ cbar.ax.tick_params(labelsize=14)
490
+
491
+
492
+ # Define the formatter function
493
+ def thousands_formatter(x, pos):
494
+ if x >= 1000:
495
+ return f'{x/1000:.1f}K'
496
+ return f'{x}'
497
+
498
+ context_lengths = pivot_table.columns
499
+ formatted_context_lengths = [thousands_formatter(x, None) for x in context_lengths]
500
+
501
+ # More aesthetics
502
+ plt.xlabel("Num. of Frames", fontsize=14) # X-axis label
503
+ plt.ylabel("Depth Percent", fontsize=14) # Y-axis label
504
+ plt.xticks(ticks=[i + 0.5 for i in range(len(context_lengths))], labels=formatted_context_lengths, rotation=45, fontsize=14)
505
+ # plt.xticks(rotation=45, fontsize=14) # Rotates the x-axis labels to prevent overlap
506
+ plt.yticks(rotation=0, fontsize=14) # Ensures the y-axis labels are horizontal
507
+ plt.tight_layout() # Fits everything neatly into the figure area
508
+ # save
509
+ model_name = args.model.split("/")[-1]
510
+
511
+ plt.savefig(f"{args.output_path}/{model_name}/heatmap.png")
512
+ # calculate average accuracy
513
+ average_accuracy = df["Score"].mean()
514
+ print(f"Average Accuracy: {average_accuracy}")
515
+ # save as txt
516
+ with open(f"{args.output_path}/{model_name}/avg_accuracy.txt", "w") as f:
517
+ f.write(f"Average Accuracy: {average_accuracy}\n")
518
+
519
+ def main(args):
520
+ if args.plot_only:
521
+ # load all_accuracies from json
522
+ model_name = args.model.split("/")[-1]
523
+ with open(f"{args.output_path}/{model_name}/all_accuracies.json", "r") as f:
524
+ all_accuracies = json.load(f)
525
+ plot(args, all_accuracies)
526
+ else:
527
+ all_accuracies, accelerator = inference(args)
528
+ if accelerator.is_main_process:
529
+ plot(args, all_accuracies)
530
+
531
+
532
+ if __name__ == "__main__":
533
+ args = argparse.ArgumentParser()
534
+ args.add_argument("--model", type=str, default="/mnt/hwfile/mllm/weixilin/cache/Qwen2-VL-7B-Instruct")
535
+ args.add_argument("--max_frame_num", type=int, default=1500)
536
+ args.add_argument("--needle_dataset", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/needle_datasets/dataset.json")
537
+ args.add_argument("--min_frame_num", type=int, default=400)
538
+ args.add_argument("--frame_interval", type=int, default=100)
539
+ args.add_argument("--output_path", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/niah_output")
540
+ args.add_argument("--depth_interval", type=float, default=0.1)
541
+ args.add_argument("--num_samples", type=int, default=1)
542
+ args.add_argument("--rope_theta", type=float, default=None)
543
+ args.add_argument("--haystack_dir", type=str, default="your haystack_dir")
544
+ args.add_argument("--needle_embedding_dir", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/video_needle_haystack/data/needle_vicuna_embeddings")
545
+ args.add_argument("--prompt_template", type=str, default='qwen2')
546
+ args.add_argument("--image_tokens", type=int, default=144)
547
+ args.add_argument("--rope_type", type=str, default=None)
548
+ args.add_argument("--replace_double_newline", action="store_true")
549
+ args.add_argument("--plot_only", action="store_true")
550
+ args = args.parse_args()
551
+ IMAGE_TOKENS = args.image_tokens
552
+ main(args)
vision_niah_d/eval_vision_niah_interrupt.py ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import gc
3
+ import sys
4
+ import torch
5
+ from transformers import AutoTokenizer
6
+ from transformers import LlamaForCausalLM
7
+ from easy_context import Qwen2ForCausalLM_RingAttn
8
+ from tqdm import tqdm
9
+ from accelerate import Accelerator
10
+ import glob
11
+ import numpy as np
12
+ from tqdm import tqdm
13
+ import gc
14
+ import matplotlib.pyplot as plt
15
+ import os
16
+ from matplotlib.colors import LinearSegmentedColormap
17
+ import seaborn as sns
18
+ import pandas as pd
19
+ from pathlib import Path
20
+ import random
21
+ import json
22
+ from datasets import load_dataset
23
+ from vision_niah.produce_needle_embedding import read_json_file
24
+ from easy_context import (
25
+ prepare_seq_parallel_inputs,
26
+ apply_seq_parallel_monkey_patch,
27
+ )
28
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
29
+ from torchvision import io, transforms
30
+ from torchvision.transforms import InterpolationMode
31
+ apply_seq_parallel_monkey_patch("zigzag_ring_attn", "llama")
32
+
33
+ import sys
34
+ import pdb
35
+ class ForkedPdb(pdb.Pdb):
36
+ """A Pdb subclass that may be used
37
+ from a forked multiprocessing child
38
+ """
39
+ def interaction(self, *args, **kwargs):
40
+ _stdin = sys.stdin
41
+ try:
42
+ sys.stdin = open('/dev/stdin')
43
+ pdb.Pdb.interaction(self, *args, **kwargs)
44
+ finally:
45
+ sys.stdin = _stdin
46
+
47
+ SEED = 24242424
48
+ torch.manual_seed(SEED)
49
+ random.seed(SEED)
50
+ np.random.seed(SEED)
51
+ IMAGE_TOKENS = None
52
+ prompt_templates = {
53
+ "mistral": {
54
+ "preprompt": "<s>[INST]",
55
+ "postprompt": " [/INST]"
56
+ },
57
+ "vicuna": {
58
+ "preprompt": "<s>A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER:",
59
+ "postprompt": "ASSISTANT:"
60
+ },
61
+ "llama3": {
62
+ "preprompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n",
63
+ "postprompt": "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
64
+ },
65
+ "qwen2": {
66
+ "preprompt": "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n",
67
+ "postprompt": "<|im_end|>\n<|im_start|>assistant\n",
68
+ },
69
+ "yi": {
70
+ "preprompt": "<|im_start|>system\nAnswer the questions.<|im_end|>\n<|im_start|>user\n",
71
+ "postprompt": "<|im_end|>\n<|im_start|>assistant\n",
72
+ },
73
+ }
74
+ # \nAnswer the question using a single word or phrase.
75
+ # The color of the bottle cap is
76
+ # answer = "Yellow"
77
+
78
+
79
+ def safe_tokenize(tokenizer, text):
80
+ tokenized = tokenizer.encode(text, return_tensors="pt")
81
+ if tokenizer.bos_token != None and len(tokenized) > 0 and tokenized[0, 0] == tokenizer.bos_token_id:
82
+ tokenized = tokenized[:, 1:]
83
+ return tokenized
84
+
85
+ def get_vanilla_rope_index(input_embeds, video_se):
86
+ return torch.arange(input_embeds.shape[1]).view(1, 1, -1).expand(3, 1, -1)
87
+
88
+ def get_time_rope_index(input_embeds, video_se):
89
+ llm_pos_ids_list = []
90
+ llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1))
91
+ assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float'
92
+ nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS
93
+ ## time rope
94
+ t_index = torch.arange(llm_pos_ids_list[-1].max().item() + 1, llm_pos_ids_list[-1].max().item() + 1 + nframes).repeat_interleave(IMAGE_TOKENS, dim=0).view(1, 1, -1).expand(3, 1, -1)
95
+ llm_pos_ids_list.append(t_index)
96
+ if input_embeds.shape[1] > video_se[1]:
97
+ text_len = input_embeds.shape[1] - video_se[1]
98
+ llm_pos_ids_list.append(torch.arange(t_index.max().item() + 1, text_len + t_index.max().item() + 1).view(1, 1, -1).expand(3, 1, -1))
99
+ position_ids = torch.cat(llm_pos_ids_list, dim=-1)
100
+ assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}'
101
+ return position_ids
102
+
103
+ def get_t_scale2_rope_index(input_embeds, video_se, scale_factor):
104
+ llm_pos_ids_list = []
105
+ llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1))
106
+ st_idx = llm_pos_ids_list[-1][0].max() + 1 if len(llm_pos_ids_list) > 0 else 0
107
+ assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float'
108
+ nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS
109
+ ## m_rope rope
110
+ llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16
111
+
112
+ t_index = torch.arange(llm_grid_t).view(-1, 1).expand(
113
+ -1, llm_grid_h * llm_grid_w).flatten()
114
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(
115
+ llm_grid_t, -1, llm_grid_w).flatten() - (llm_grid_h-1) // 2
116
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(
117
+ llm_grid_t, llm_grid_h, -1).flatten() - (llm_grid_w-1) // 2
118
+ t_index = t_index * scale_factor
119
+ t_index = t_index + st_idx
120
+ h_index = h_index + t_index
121
+ w_index = w_index + t_index
122
+
123
+ llm_pos_ids_list.append(
124
+ torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1))
125
+
126
+ if input_embeds.shape[1] > video_se[1]:
127
+ text_len = input_embeds.shape[1] - video_se[1]
128
+ # print(text_len)
129
+
130
+ llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1][0].max().item() + 1, llm_pos_ids_list[-1][0].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1))
131
+ # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape)
132
+ position_ids = torch.cat(llm_pos_ids_list, dim=-1)
133
+ assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}'
134
+ return position_ids
135
+
136
+ def get_m_rope_index(input_embeds, video_se):
137
+ llm_pos_ids_list = []
138
+ llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1))
139
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
140
+ assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float'
141
+ nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS
142
+ ## m_rope rope
143
+ llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16
144
+ t_index = torch.arange(nframes).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
145
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
146
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
147
+ llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1) + st_idx)
148
+ if input_embeds.shape[1] > video_se[1]:
149
+ text_len = input_embeds.shape[1] - video_se[1]
150
+ llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1].max().item() + 1, llm_pos_ids_list[-1].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1))
151
+ position_ids = torch.cat(llm_pos_ids_list, dim=-1)
152
+ assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}'
153
+ return position_ids
154
+
155
+ def get_m_modify_margin_index(input_embeds, video_se):
156
+ llm_pos_ids_list = []
157
+ llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1))
158
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
159
+ assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float'
160
+ nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS
161
+ ## m_rope rope
162
+ llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16
163
+
164
+ t_index = torch.arange(llm_grid_t).view(-1, 1).expand(
165
+ -1, llm_grid_h * llm_grid_w).flatten()
166
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(
167
+ llm_grid_t, -1, llm_grid_w).flatten() - (llm_grid_h-1) // 2
168
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(
169
+ llm_grid_t, llm_grid_h, -1).flatten() - (llm_grid_w-1) // 2
170
+ t_index = t_index + st_idx
171
+ h_index = h_index + t_index
172
+ w_index = w_index + t_index
173
+
174
+ llm_pos_ids_list.append(
175
+ torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1))
176
+
177
+ if input_embeds.shape[1] > video_se[1]:
178
+ text_len = input_embeds.shape[1] - video_se[1]
179
+ # print(text_len)
180
+
181
+ llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1].max().item() + 1, llm_pos_ids_list[-1].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1))
182
+ # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape)
183
+ position_ids = torch.cat(llm_pos_ids_list, dim=-1)
184
+ assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}'
185
+ return position_ids
186
+
187
+ def get_m_modify_no_center_index(input_embeds, video_se):
188
+ llm_pos_ids_list = []
189
+ llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1))
190
+ st_idx = llm_pos_ids_list[-1][0].max() + 1 if len(llm_pos_ids_list) > 0 else 0
191
+ assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float'
192
+ nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS
193
+ ## m_rope rope
194
+ llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16
195
+
196
+ t_index = torch.arange(llm_grid_t).view(-1, 1).expand(
197
+ -1, llm_grid_h * llm_grid_w).flatten()
198
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(
199
+ llm_grid_t, -1, llm_grid_w).flatten()
200
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(
201
+ llm_grid_t, llm_grid_h, -1).flatten()
202
+
203
+ llm_pos_ids_list.append(
204
+ torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1) + st_idx)
205
+
206
+ if input_embeds.shape[1] > video_se[1]:
207
+ text_len = input_embeds.shape[1] - video_se[1]
208
+ # print(text_len)
209
+
210
+ llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1][0].max().item() + 1, llm_pos_ids_list[-1][0].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1))
211
+ # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape)
212
+ position_ids = torch.cat(llm_pos_ids_list, dim=-1)
213
+ assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}'
214
+ return position_ids
215
+
216
+ def get_m_modify_index(input_embeds, video_se):
217
+ llm_pos_ids_list = []
218
+ llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1))
219
+ st_idx = llm_pos_ids_list[-1][0].max() + 1 if len(llm_pos_ids_list) > 0 else 0
220
+ assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float'
221
+ nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS
222
+ ## m_rope rope
223
+ llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16
224
+
225
+ t_index = torch.arange(llm_grid_t).view(-1, 1).expand(
226
+ -1, llm_grid_h * llm_grid_w).flatten()
227
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(
228
+ llm_grid_t, -1, llm_grid_w).flatten() - (llm_grid_h-1) // 2
229
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(
230
+ llm_grid_t, llm_grid_h, -1).flatten() - (llm_grid_w-1) // 2
231
+ t_index = t_index + st_idx
232
+ h_index = h_index + t_index
233
+ w_index = w_index + t_index
234
+
235
+ llm_pos_ids_list.append(
236
+ torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1))
237
+
238
+ if input_embeds.shape[1] > video_se[1]:
239
+ text_len = input_embeds.shape[1] - video_se[1]
240
+ # print(text_len)
241
+
242
+ llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1][0].max().item() + 1, llm_pos_ids_list[-1][0].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1))
243
+ # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape)
244
+ position_ids = torch.cat(llm_pos_ids_list, dim=-1)
245
+ assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}'
246
+ return position_ids
247
+
248
+ def get_position_ids(input_embeds, rope_type, video_se):
249
+ if rope_type == 'vanilla_rope':
250
+ return get_vanilla_rope_index(input_embeds, video_se)
251
+ elif rope_type == 'tad_rope':
252
+ return get_time_rope_index(input_embeds, video_se) + get_vanilla_rope_index(input_embeds, video_se)
253
+ elif rope_type == 'm_rope':
254
+ return get_m_rope_index(input_embeds, video_se)
255
+ elif rope_type == 'videorope':
256
+ scale_factor = 2.0
257
+ return get_t_scale2_rope_index(input_embeds, video_se, scale_factor)
258
+ else:
259
+ raise ValueError(f"not this rope: {rope_type}")
260
+
261
+ # answer = "more bet"
262
+ def eval_forward(args, video_se, accelerator, model, input_embeds, answer_embeds, pad_id, answer_ids, tokenizer):
263
+ # first append answer_embeds to input_embeds
264
+ prompt_length = input_embeds.shape[1]
265
+ labels_length = answer_embeds.shape[1]
266
+ input_embeds = torch.cat([input_embeds, answer_embeds], dim=1)
267
+ # second pad input_embeds to the multiple of accelerator.num_processes
268
+ pad_tensor = torch.tensor(
269
+ [pad_id]
270
+ * (
271
+ (accelerator.num_processes * 2)
272
+ - input_embeds.shape[1] % (accelerator.num_processes * 2)
273
+ )
274
+ ).unsqueeze(0).unsqueeze(-1).expand(-1, -1, input_embeds.shape[-1]).to(accelerator.device)
275
+ input_embeds = torch.cat([input_embeds, pad_tensor], dim=1)
276
+ # position_ids = (
277
+ # torch.arange(input_embeds.shape[1]).unsqueeze(0).expand(input_embeds.shape[0], -1)
278
+ # ).to(accelerator.device)
279
+ position_ids = get_position_ids(input_embeds, args.rope_type, video_se)
280
+ # ForkedPdb().set_trace()
281
+ accelerator.print(input_embeds.shape)
282
+ prepared = prepare_seq_parallel_inputs(
283
+ "zigzag_ring_attn",
284
+ input_embeds,
285
+ position_ids,
286
+ None,
287
+ accelerator.process_index,
288
+ accelerator.num_processes,
289
+ accelerator.device,
290
+ )
291
+ local_input_embeds = prepared["local_input_ids"]
292
+ local_position_ids = prepared["local_position_ids"]
293
+ if 'm_modify' in args.rope_type or 't_only' in args.rope_type or 'change_freq' in args.rope_type:
294
+ from transformers.models.qwen2_vl import modeling_qwen2_vl
295
+ modeling_qwen2_vl.apply_multimodal_rotary_pos_emb = modeling_qwen2_vl.apply_m_modify_multimodal_rotary_pos_emb
296
+ with torch.inference_mode():
297
+ hidden_states = model.model(
298
+ inputs_embeds=local_input_embeds,
299
+ position_ids=local_position_ids,
300
+ use_cache=False,
301
+ )[0]
302
+ logits = model.lm_head(hidden_states)
303
+ logits = logits.float()
304
+
305
+ pred = logits.argmax(dim=-1)
306
+
307
+ # gather all logits using accelerator.gather
308
+ def undo_extract_local(gathered_value, world_size, dim=1):
309
+ value_chunks = gathered_value.chunk(2 * world_size, dim=dim)
310
+ reordered_chunks = [None] * (2 * world_size)
311
+ for i in range(world_size):
312
+ reordered_chunks[i] = value_chunks[i * 2]
313
+ reordered_chunks[2 * world_size - i - 1] = value_chunks[i * 2 + 1]
314
+ return torch.cat(reordered_chunks, dim=dim)
315
+
316
+ correct = False
317
+
318
+ gathered_logits = accelerator.gather(pred.squeeze(0)).unsqueeze(0)
319
+ # undo extract local on the gathered logits
320
+ # ForkedPdb().set_trace()
321
+ pred = undo_extract_local(gathered_logits, accelerator.num_processes)
322
+ pred = pred[:, prompt_length - 1 : prompt_length + labels_length - 1]
323
+ # check if the logits are correct, extract argmax id
324
+ # compare the predicted_ids with the labels
325
+ correct = (pred == answer_ids.to(accelerator.device)).all()
326
+ if accelerator.is_main_process:
327
+ print(
328
+ "Predicted: ",
329
+ tokenizer.decode(pred.squeeze().tolist()),
330
+ "Answer: ",
331
+ tokenizer.decode(answer_ids.squeeze().tolist()),
332
+ )
333
+ # print id as well
334
+ print(
335
+ "Predicted: ",
336
+ pred.squeeze().tolist(),
337
+ "Answer: ",
338
+ answer_ids.squeeze().tolist(),
339
+ )
340
+ return int(correct)
341
+
342
+
343
+ def load_haystack(args, accelerator):
344
+ haystack_embeddings = torch.load(f"{args.haystack_dir}/video_embeddings.pt").to(torch.bfloat16)
345
+
346
+ return haystack_embeddings
347
+
348
+ def load_text_embeddings(str, tokenizer, model, accelerator, replace_double_newline=False):
349
+ token_ids = safe_tokenize(tokenizer, str)
350
+ def replace_double_newline_func(token_ids):
351
+ # subsitute token id 271 to two 198]
352
+ # for example:
353
+ # from: tensor([[128000, 128006, 9125, 128007, 271, 2675, 527, 264, 11190, 4221, 323, 11376, 18328, 13]])
354
+ # to: tensor([[128000, 128006, 9125, 128007, 198, 198, 2675, 527, 264, 11190, 4221, 323, 11376, 18328, 13]])
355
+ # length will increase by number of 271
356
+ double_newline_loc = (token_ids == 271).nonzero()[:, 1]
357
+ double_newline_loc += torch.arange(len(double_newline_loc))
358
+ if len(double_newline_loc) > 0:
359
+ for loc in double_newline_loc:
360
+ token_ids = torch.cat([token_ids[:, :loc], torch.tensor([[198, 198]]), token_ids[:, loc+1:]], dim=1)
361
+ return token_ids
362
+ if replace_double_newline:
363
+ token_ids = replace_double_newline_func(token_ids)
364
+ token_ids = token_ids.to(accelerator.device)
365
+ with torch.inference_mode():
366
+ embeddings = model.model.embed_tokens(token_ids)
367
+ return embeddings.to(torch.bfloat16)
368
+
369
+ def inference(args):
370
+ accelerator = Accelerator(
371
+ mixed_precision="bf16",
372
+ )
373
+ model_path = args.model
374
+ model = Qwen2VLForConditionalGeneration.from_pretrained(model_path,
375
+ device_map=accelerator.device,
376
+ torch_dtype=torch.bfloat16,
377
+ attn_implementation="flash_attention_2"
378
+ )
379
+ del model.visual
380
+ processor = AutoProcessor.from_pretrained("/mnt/hwfile/mllm/weixilin/cache/Qwen2-VL-7B-Instruct")
381
+ tokenizer = processor.tokenizer
382
+
383
+
384
+ kwargs = {"rope_theta": args.rope_theta} if args.rope_theta is not None else {}
385
+ tokenizer.pad_token = tokenizer.eos_token
386
+ # remember to remove <s>
387
+ accelerator.print("Preparing Haystack...")
388
+ haystack_embeddings = load_haystack(args, accelerator)
389
+ target_length = args.max_frame_num * IMAGE_TOKENS
390
+ # ForkedPdb().set_trace()
391
+ if len(haystack_embeddings) < target_length:
392
+ repeat_times = (target_length + len(haystack_embeddings) - 1) // len(haystack_embeddings) # 向上取整计算需要重复的次数
393
+ haystack_embeddings = torch.cat([haystack_embeddings] * repeat_times, dim=0)[:target_length]
394
+
395
+ assert len(haystack_embeddings) >= args.max_frame_num * IMAGE_TOKENS, "Haystack embeddings are not enough. Max frame {} is not found. Currently only {} frames.".format(args.max_frame_num, len(haystack_embeddings))
396
+ # import pdb; pdb.set_trace()
397
+
398
+ haystack_embeddings = haystack_embeddings[:args.max_frame_num * IMAGE_TOKENS].to(accelerator.device)
399
+ prompt = prompt_templates[args.prompt_template]
400
+ preprompt_embeddings = load_text_embeddings(prompt["preprompt"], tokenizer, model, accelerator, args.replace_double_newline)
401
+ postprompt_embeddings = load_text_embeddings(prompt["postprompt"], tokenizer, model, accelerator, args.replace_double_newline)
402
+
403
+ needle_dataset = read_json_file(args.needle_dataset)
404
+ answer_embedding_list = []
405
+ answer_id_list = []
406
+ needle_embedding_list = []
407
+ needle_embedding_interrupt_list = []
408
+ question_embeding_list = []
409
+ for index, instance in enumerate(needle_dataset):
410
+ answer = instance["answer"]
411
+ question = instance["prompt"]
412
+ needle_embedding_list.append(torch.load(args.needle_embedding_dir + f"/{index}.pt", map_location="cpu").to(torch.bfloat16).to(accelerator.device))
413
+ needle_embedding_interrupt_list.append(torch.load(args.needle_embedding_interrupt_dir + f"/{index}.pt", map_location="cpu").to(torch.bfloat16).to(accelerator.device))
414
+ answer_embedding_list.append(load_text_embeddings(answer, tokenizer, model, accelerator))
415
+ answer_id_list.append(safe_tokenize(tokenizer, answer))
416
+ question_embeding_list.append(load_text_embeddings(question, tokenizer, model, accelerator))
417
+
418
+ accelerator.print("Starting Evaluation...")
419
+ model = accelerator.prepare(model)
420
+ model.gradient_checkpointing_enable()
421
+ all_accuries = []
422
+ for num_frames in tqdm(
423
+ range(
424
+ args.min_frame_num, args.max_frame_num + 1, args.frame_interval
425
+ )
426
+ ):
427
+ for depth in np.arange(0, 1 + args.depth_interval, args.depth_interval):
428
+ accuracies = []
429
+ for question_embedding, needle_embedding, needle_embedding_interrupt, answer_embedding, answer_id in zip(question_embeding_list, needle_embedding_list, needle_embedding_interrupt_list, answer_embedding_list, answer_id_list):
430
+ query_frame_idx = int(depth * num_frames)
431
+ # import pdb; pdb.set_trace()
432
+ #! interrupt every 200 frames
433
+ import random
434
+ test_p = random.random()
435
+ mode = 'no'
436
+ cycle = 200
437
+ if query_frame_idx - cycle <= 0 and query_frame_idx + cycle >= num_frames: mode = 'no'
438
+ elif query_frame_idx < cycle: mode = 'after'
439
+ elif query_frame_idx + cycle >= num_frames: mode = 'before'
440
+ elif test_p < 0.5: mode = 'before'
441
+ else: mode = 'after'
442
+ print(f"{mode=}")
443
+ if mode == 'before':
444
+ input_frames = torch.cat([haystack_embeddings[:(query_frame_idx-cycle) * IMAGE_TOKENS].to(accelerator.device), needle_embedding_interrupt.to(accelerator.device), haystack_embeddings[(query_frame_idx-cycle) * IMAGE_TOKENS:query_frame_idx * IMAGE_TOKENS].to(accelerator.device), needle_embedding.to(accelerator.device), haystack_embeddings[query_frame_idx*IMAGE_TOKENS:num_frames*IMAGE_TOKENS].to(accelerator.device)], dim=0).view(-1, haystack_embeddings.shape[-1]).unsqueeze(0)
445
+ elif mode == 'after':
446
+ input_frames = torch.cat([haystack_embeddings[:query_frame_idx * IMAGE_TOKENS].to(accelerator.device),needle_embedding.to(accelerator.device), haystack_embeddings[query_frame_idx*IMAGE_TOKENS:(query_frame_idx+cycle)*IMAGE_TOKENS].to(accelerator.device), needle_embedding_interrupt.to(accelerator.device), haystack_embeddings[(query_frame_idx+cycle)*IMAGE_TOKENS:num_frames*IMAGE_TOKENS].to(accelerator.device)], dim=0).view(-1, haystack_embeddings.shape[-1]).unsqueeze(0)
447
+ else:
448
+ input_frames = torch.cat([haystack_embeddings[:query_frame_idx * IMAGE_TOKENS].to(accelerator.device),needle_embedding.to(accelerator.device), haystack_embeddings[query_frame_idx*IMAGE_TOKENS:num_frames*IMAGE_TOKENS].to(accelerator.device)], dim=0).view(-1, haystack_embeddings.shape[-1]).unsqueeze(0)
449
+ input_emebds = torch.cat([preprompt_embeddings.to(accelerator.device), input_frames.to(accelerator.device),question_embedding.to(accelerator.device), postprompt_embeddings.to(accelerator.device)], dim=1)
450
+ video_se = (preprompt_embeddings.shape[1], preprompt_embeddings.shape[1] + input_frames.shape[1])
451
+ # ForkedPdb().set_trace()
452
+ correct = eval_forward(
453
+ args, video_se, accelerator, model, input_emebds, answer_embedding, tokenizer.pad_token_id, answer_id, tokenizer
454
+ )
455
+ gc.collect()
456
+ torch.cuda.empty_cache()
457
+ if accelerator.is_main_process:
458
+ accuracies.append(correct)
459
+ if accelerator.is_main_process:
460
+ result = {
461
+ "Num. Frame": num_frames,
462
+ "Frame Depth": round(depth * 100, -1),
463
+ "Score": sum(accuracies) / len(accuracies),
464
+ }
465
+ accelerator.print(result)
466
+ all_accuries.append(result)
467
+ if accelerator.is_main_process:
468
+ model_name = args.model.split("/")[-1]
469
+ os.makedirs(f"{args.output_path}/{model_name}", exist_ok=True)
470
+ # save all_accuries as json
471
+ with open(f"{args.output_path}/{model_name}/all_accuracies.json", "w") as f:
472
+ json.dump(all_accuries, f, indent=4)
473
+ return all_accuries, accelerator
474
+
475
+
476
+ def plot(args, all_accuries):
477
+ df = pd.DataFrame(all_accuries)
478
+ cmap = LinearSegmentedColormap.from_list(
479
+ "custom_cmap", ["#F0496E", "#EBB839", "#9ad5b3"]
480
+ )
481
+
482
+ pivot_table = pd.pivot_table(
483
+ df,
484
+ values="Score",
485
+ index=["Frame Depth", "Num. Frame"],
486
+ aggfunc="mean",
487
+ ).reset_index() # This will aggregate
488
+ pivot_table = pivot_table.pivot(
489
+ index="Frame Depth", columns="Num. Frame", values="Score"
490
+ )
491
+ # Create the heatmap with better aesthetics
492
+ plt.figure(figsize=(17.5, 8)) # Can adjust these dimensions as needed
493
+ ax = sns.heatmap(
494
+ pivot_table,
495
+ # annot=True,
496
+ fmt="g",
497
+ vmin=0,
498
+ vmax=1,
499
+ linecolor='white',
500
+ linewidths=1.5,
501
+ cmap=cmap,
502
+ cbar_kws={"label": "Score"},
503
+ )
504
+
505
+ # Set the color bar label font size
506
+ cbar = ax.collections[0].colorbar
507
+ cbar.ax.yaxis.label.set_size(14)
508
+ cbar.ax.tick_params(labelsize=14)
509
+
510
+
511
+ # Define the formatter function
512
+ def thousands_formatter(x, pos):
513
+ if x >= 1000:
514
+ return f'{x/1000:.1f}K'
515
+ return f'{x}'
516
+
517
+ context_lengths = pivot_table.columns
518
+ formatted_context_lengths = [thousands_formatter(x, None) for x in context_lengths]
519
+
520
+ # More aesthetics
521
+ plt.xlabel("Num. of Frames", fontsize=14) # X-axis label
522
+ plt.ylabel("Depth Percent", fontsize=14) # Y-axis label
523
+ plt.xticks(ticks=[i + 0.5 for i in range(len(context_lengths))], labels=formatted_context_lengths, rotation=45, fontsize=14)
524
+ # plt.xticks(rotation=45, fontsize=14) # Rotates the x-axis labels to prevent overlap
525
+ plt.yticks(rotation=0, fontsize=14) # Ensures the y-axis labels are horizontal
526
+ plt.tight_layout() # Fits everything neatly into the figure area
527
+ # save
528
+ model_name = args.model.split("/")[-1]
529
+
530
+ plt.savefig(f"{args.output_path}/{model_name}/heatmap.png")
531
+ # calculate average accuracy
532
+ average_accuracy = df["Score"].mean()
533
+ print(f"Average Accuracy: {average_accuracy}")
534
+ # save as txt
535
+ with open(f"{args.output_path}/{model_name}/avg_accuracy.txt", "w") as f:
536
+ f.write(f"Average Accuracy: {average_accuracy}\n")
537
+
538
+ def main(args):
539
+ if args.plot_only:
540
+ # load all_accuracies from json
541
+ model_name = args.model.split("/")[-1]
542
+ with open(f"{args.output_path}/{model_name}/all_accuracies.json", "r") as f:
543
+ all_accuracies = json.load(f)
544
+ plot(args, all_accuracies)
545
+ else:
546
+ all_accuracies, accelerator = inference(args)
547
+ if accelerator.is_main_process:
548
+ plot(args, all_accuracies)
549
+
550
+
551
+ if __name__ == "__main__":
552
+ args = argparse.ArgumentParser()
553
+ args.add_argument("--model", type=str, default="/mnt/hwfile/mllm/weixilin/cache/Qwen2-VL-7B-Instruct")
554
+ args.add_argument("--max_frame_num", type=int, default=1500)
555
+ args.add_argument("--needle_dataset", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/needle_datasets/dataset_change_format.json")
556
+ args.add_argument("--min_frame_num", type=int, default=400)
557
+ args.add_argument("--frame_interval", type=int, default=100)
558
+ args.add_argument("--output_path", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/niah_output")
559
+ args.add_argument("--depth_interval", type=float, default=0.1)
560
+ args.add_argument("--num_samples", type=int, default=1)
561
+ args.add_argument("--rope_theta", type=float, default=None)
562
+ args.add_argument("--haystack_dir", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/video_needle_haystack/data/haystack_vicuna_embeddings_concat3000frames_144tokens_has_background")
563
+ args.add_argument("--needle_embedding_dir", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/video_needle_haystack/data/needle_vicuna_embeddings")
564
+ args.add_argument("--needle_embedding_interrupt_dir", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/video_needle_haystack/data/needle_vicuna_embeddings")
565
+ args.add_argument("--prompt_template", type=str, default='qwen2')
566
+ args.add_argument("--image_tokens", type=int, default=144)
567
+ args.add_argument("--rope_type", type=str, default=None)
568
+ args.add_argument("--replace_double_newline", action="store_true")
569
+ args.add_argument("--plot_only", action="store_true")
570
+ args = args.parse_args()
571
+ IMAGE_TOKENS = args.image_tokens
572
+ main(args)
vision_niah_d/needle_datasets/dataset.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "path": "zoo.png",
4
+ "prompt": "\nFind the frame with the word 'zoo'. What is the animal outside the zoo shop?\nA. lion\nB. tiger\nC. horse\nD. dog\nAnswer with the option's letter from the given choices directly.",
5
+ "answer": "B"
6
+ },
7
+ {
8
+ "path": "sora_balloon.png",
9
+ "prompt": "\nFind the frame of a couple in a wedding. In side the frame, there is a balloon on the bridegroom's head. What is the color of that ballon?\nA. Yellow\nB. Red\nC. Blue\nD. White\nPlease provide your answer by stating the letter followed by the full option.",
10
+ "answer": "A"
11
+ },
12
+ {
13
+ "path": "selenium_green.jpg",
14
+ "prompt": "\nFind the frame with the image of Selenium tablets. How many mg does each tablet contain?\nAnswer the question using a single word or phrase.",
15
+ "answer": "200"
16
+ },
17
+ {
18
+ "path": "panda_scientist.png",
19
+ "prompt": "\nFind the frame of a scientist. The scientist is a...\nA. Bird\nB. Elephant\nC. Panda\nD. Dog\nPlease provide your answer by stating the letter followed by the full option.",
20
+ "answer": "C"
21
+ },
22
+ {
23
+ "path": "teddy_bear_times_square.png",
24
+ "prompt": "\nFind the frame of a teddy bear. Where is this teddy bear?\nA. Times Square\nB. Eiffel Tower\nC. Taj Mahal\nD. Sydney Opera House\nPlease provide your answer by stating the letter followed by the full option.",
25
+ "answer": "A"
26
+ }
27
+ ]
vision_niah_d/needle_datasets/dataset_interrupt.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "path": "zoo_interrupt.png",
4
+ "prompt": "\nFind the frame with the word 'zoo'. What is the animal outside the zoo shop?\nA. lion\nB. tiger\nC. horse\nD. dog\nAnswer with the option's letter from the given choices directly.",
5
+ "answer": "B"
6
+ },
7
+ {
8
+ "path": "sora_balloon_interrupt.png",
9
+ "prompt": "\nFind the frame of a couple in a wedding. In side the frame, there is a balloon on the bridegroom's head. What is the color of that ballon?\nA. Yellow\nB. Red\nC. Blue\nD. White\nPlease provide your answer by stating the letter followed by the full option.",
10
+ "answer": "A"
11
+ },
12
+ {
13
+ "path": "selenium_green_interrupt.png",
14
+ "prompt": "\nFind the frame with the image of Selenium tablets. How many mg does each tablet contain?\nAnswer the question using a single word or phrase.",
15
+ "answer": "200"
16
+ },
17
+ {
18
+ "path": "panda_scientist_interrupt.png",
19
+ "prompt": "\nFind the frame of a scientist. The scientist is a...\nA. Bird\nB. Elephant\nC. Panda\nD. Dog\nPlease provide your answer by stating the letter followed by the full option.",
20
+ "answer": "C"
21
+ },
22
+ {
23
+ "path": "teddy_bear_times_square_interrupt.png",
24
+ "prompt": "\nFind the frame of a teddy bear. Where is this teddy bear?\nA. Times Square\nB. Eiffel Tower\nC. Taj Mahal\nD. Sydney Opera House\nPlease provide your answer by stating the letter followed by the full option.",
25
+ "answer": "A"
26
+ }
27
+ ]
vision_niah_d/needle_datasets/git_placeholder ADDED
File without changes
vision_niah_d/needle_datasets/images/astronaut.png ADDED

Git LFS Details

  • SHA256: aa2edd0f58161729b0fc708ba2132a616d99d8f3c336cdf6de7ef0cf2ae4701a
  • Pointer size: 132 Bytes
  • Size of remote file: 2.11 MB
vision_niah_d/needle_datasets/images/construction_site.png ADDED

Git LFS Details

  • SHA256: 356159a446e46ce0909fdd791b96d12e38bbf6d2ad84e2e4309b779004cc9655
  • Pointer size: 132 Bytes
  • Size of remote file: 2.28 MB
vision_niah_d/needle_datasets/images/dolphin.png ADDED

Git LFS Details

  • SHA256: b8865563b7736314c90fe285e7df945c6338e08b03cd99c329985489f10d751f
  • Pointer size: 132 Bytes
  • Size of remote file: 4.55 MB
vision_niah_d/needle_datasets/images/llava-next.png ADDED

Git LFS Details

  • SHA256: ec1ec34bc2982c72a4a39d39fedaa77b210cedfb5b9bb2a8b22c6dcf0f569e26
  • Pointer size: 132 Bytes
  • Size of remote file: 2 MB
vision_niah_d/needle_datasets/images/panda_scientist.png ADDED

Git LFS Details

  • SHA256: fb19d41d4d6fe9e011ec19f3df132d6c241dc0fccf3cf29d3bdc86d50a37ecf4
  • Pointer size: 132 Bytes
  • Size of remote file: 4.91 MB
vision_niah_d/needle_datasets/images/panda_scientist_interrupt.png ADDED

Git LFS Details

  • SHA256: be6fcf8c95a9d9be6bcd690728b2ae02d9d6184c73f31d9a94987bcd75c9dbf6
  • Pointer size: 130 Bytes
  • Size of remote file: 71.6 kB
vision_niah_d/needle_datasets/images/selenium_green.jpg ADDED

Git LFS Details

  • SHA256: 64ad70bd814cd4398bd1768deab93c200a44743ca8ff53b7e633b68cfee1a94f
  • Pointer size: 131 Bytes
  • Size of remote file: 112 kB
vision_niah_d/needle_datasets/images/selenium_green_interrupt.png ADDED

Git LFS Details

  • SHA256: 41d563dac3b7e2e069cfeb3f25a8447aec8a63d164d3a8a16fb0343053bdaa71
  • Pointer size: 130 Bytes
  • Size of remote file: 44.6 kB
vision_niah_d/needle_datasets/images/sora_balloon.png ADDED

Git LFS Details

  • SHA256: 9c9bfd8d732b27dad23b9f93be38e18d3f5e4cf281933c5b1a401602d42b8c3d
  • Pointer size: 132 Bytes
  • Size of remote file: 1.51 MB
vision_niah_d/needle_datasets/images/sora_balloon_interrupt.png ADDED

Git LFS Details

  • SHA256: d1443d2fb3526d21ae891a8dc090e770b9166c7375822bf026069bd9888dbf37
  • Pointer size: 130 Bytes
  • Size of remote file: 39.6 kB