diff --git a/vision_niah_d/easy_context/__init__.py b/vision_niah_d/easy_context/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..523a8b378586303dbd124acddf884d8f00fc21f7 --- /dev/null +++ b/vision_niah_d/easy_context/__init__.py @@ -0,0 +1,56 @@ +from .dist_flash_attn.prepare_input import prepare_dist_flash_attn_inputs +from .dist_flash_attn.monkey_patch import apply_dist_flash_attn_monkey_patch_llama +from .zigzag_ring_attn.prepare_inputs import prepare_zigzag_ring_attn_inputs +from .zigzag_ring_attn.monkey_patch import apply_zigzag_ring_attn_monkey_patch_llama +from .zigzag_ring_attn.monkey_patch import apply_zigzag_ring_attn_monkey_patch_mistral +from .unsloth_offloaded_gradient_checkpoint.monkey_patch import apply_unsloth_offloaded_gradient_checkpoint_monkey_patch +from .ulysses_attn.prepare_inputs import prepare_ulysses_attn_inputs +from .ulysses_attn.monkey_patch import apply_ulysses_attn_monkey_patch_llama +from .modeling_qwen2 import Qwen2ForCausalLM_RingAttn +def prepare_seq_parallel_inputs( + seq_algo, input_ids, position_ids, target_ids, rank, world_size, device +): + if seq_algo == "zigzag_ring_attn": + return prepare_zigzag_ring_attn_inputs( + input_ids, position_ids, target_ids, rank, world_size, device + ) + elif seq_algo == "dist_flash_attn": + return prepare_dist_flash_attn_inputs( + input_ids, position_ids, target_ids, rank, world_size, device + ) + elif seq_algo == "ulysses_attn": + return prepare_ulysses_attn_inputs( + input_ids, position_ids, target_ids, rank, world_size, device + ) + elif seq_algo == "data_parallel": + return { + "local_input_ids": input_ids.to(device), + "local_position_ids": position_ids.to(device), + "local_target_ids": target_ids.to(device), + } + else: + raise ValueError(f"Invalid seq_algo: {seq_algo}") + +def apply_seq_parallel_monkey_patch( + seq_algo, model +): + assert seq_algo in ["zigzag_ring_attn", "dist_flash_attn", "ulysses_attn", "data_parallel"], f"Invalid seq_algo: {seq_algo}" + assert model in ["llama", "mistral"], f"Invalid model: {model}" + if seq_algo == "data_parallel": + return + elif seq_algo == "zigzag_ring_attn" and model == "llama": + apply_zigzag_ring_attn_monkey_patch_llama() + elif seq_algo == "zigzag_ring_attn" and model == "mistral": + apply_zigzag_ring_attn_monkey_patch_mistral() + elif seq_algo == "dist_flash_attn" and model == "llama": + apply_dist_flash_attn_monkey_patch_llama() + elif seq_algo == "ulysses_attn" and model == "llama": + apply_ulysses_attn_monkey_patch_llama() + else: + raise ValueError(f"Invalid seq_algo: {seq_algo} or model: {model}") + +def prepare_dataloader(seq_algo, dataloader, acclerator): + if seq_algo == "data_parallel": + return acclerator.prepare(dataloader) + else: + return dataloader \ No newline at end of file diff --git a/vision_niah_d/easy_context/__pycache__/__init__.cpython-310.pyc b/vision_niah_d/easy_context/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea5d4ff3e2bf9b008cc1050660056120ee9692ca Binary files /dev/null and b/vision_niah_d/easy_context/__pycache__/__init__.cpython-310.pyc differ diff --git a/vision_niah_d/easy_context/__pycache__/low_mem_cross_ent.cpython-310.pyc b/vision_niah_d/easy_context/__pycache__/low_mem_cross_ent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e818fe05f3d8a24e9e66c4ed564f2b78984ab2e Binary files /dev/null and b/vision_niah_d/easy_context/__pycache__/low_mem_cross_ent.cpython-310.pyc differ diff --git a/vision_niah_d/easy_context/__pycache__/modeling_qwen2.cpython-310.pyc b/vision_niah_d/easy_context/__pycache__/modeling_qwen2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e024ce34d8d8b7058f968cef5db8927c75b2a0e Binary files /dev/null and b/vision_niah_d/easy_context/__pycache__/modeling_qwen2.cpython-310.pyc differ diff --git a/vision_niah_d/easy_context/accelerate_configs/deepspeed_inference.yaml b/vision_niah_d/easy_context/accelerate_configs/deepspeed_inference.yaml new file mode 100644 index 0000000000000000000000000000000000000000..40713c68c4ab92b0198f1890864e71579892fb10 --- /dev/null +++ b/vision_niah_d/easy_context/accelerate_configs/deepspeed_inference.yaml @@ -0,0 +1,17 @@ +compute_environment: LOCAL_MACHINE +debug: false +deepspeed_config: + deepspeed_config_file: easy_context/accelerate_configs/zero3_offload_inference.json + zero3_init_flag: false +distributed_type: DEEPSPEED +downcast_bf16: 'no' +machine_rank: 0 +main_training_function: main +num_machines: 1 +num_processes: 8 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false diff --git a/vision_niah_d/easy_context/accelerate_configs/single_node.yaml b/vision_niah_d/easy_context/accelerate_configs/single_node.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8b12a2afd5e0a56dcea75cd75af7941416f829b2 --- /dev/null +++ b/vision_niah_d/easy_context/accelerate_configs/single_node.yaml @@ -0,0 +1,17 @@ +compute_environment: LOCAL_MACHINE +debug: false +deepspeed_config: + deepspeed_config_file: easy_context/accelerate_configs/zero3_offload.json + zero3_init_flag: false +distributed_type: DEEPSPEED +downcast_bf16: 'no' +machine_rank: 0 +main_training_function: main +num_machines: 1 +num_processes: 8 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false diff --git a/vision_niah_d/easy_context/accelerate_configs/two_node.yaml b/vision_niah_d/easy_context/accelerate_configs/two_node.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c3b2284c416a6d4ebb5031063671bbc1688ad0ff --- /dev/null +++ b/vision_niah_d/easy_context/accelerate_configs/two_node.yaml @@ -0,0 +1,16 @@ +debug: false +deepspeed_config: + deepspeed_config_file: easy_context/accelerate_configs/zero3_offload.json + deepspeed_multinode_launcher: standard + zero3_init_flag: false +distributed_type: DEEPSPEED +downcast_bf16: 'no' +num_machines: 2 +num_processes: 16 +main_training_function: main +rdzv_backend: c10d +same_network: false +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false diff --git a/vision_niah_d/easy_context/accelerate_configs/zero3_offload.json b/vision_niah_d/easy_context/accelerate_configs/zero3_offload.json new file mode 100644 index 0000000000000000000000000000000000000000..21a739cc0ef45f1e87b2ed723c989ffac06c54d4 --- /dev/null +++ b/vision_niah_d/easy_context/accelerate_configs/zero3_offload.json @@ -0,0 +1,52 @@ +{ + "bf16": { + "enabled": "auto" + }, + "fp16": { + "enabled": "auto" + }, + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": 1e-5, + "warmup_max_lr": 1e-5, + "warmup_num_steps": 0, + "warmup_type": "linear" + } + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": [0.9, 0.95], + "eps": 1e-8, + "weight_decay": 0.1 + } + }, + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + }, + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": 1, + "wall_clock_breakdown": false +} diff --git a/vision_niah_d/easy_context/accelerate_configs/zero3_offload_inference.json b/vision_niah_d/easy_context/accelerate_configs/zero3_offload_inference.json new file mode 100644 index 0000000000000000000000000000000000000000..79ecddaec7a3341abadfa81267b6a76f1b41b5db --- /dev/null +++ b/vision_niah_d/easy_context/accelerate_configs/zero3_offload_inference.json @@ -0,0 +1,21 @@ +{ + "bf16": { + "enabled": "auto" + }, + "fp16": { + "enabled": "auto" + }, + "zero_optimization": { + "stage": 3, + "stage3_prefetch_bucket_size": 33554432, + "stage3_param_persistence_threshold": 4096, + "stage3_max_live_parameters":33554432, + "offload_param": { + "device": "cpu", + "pin_memory": true + } + }, + "train_batch_size": 8, + "train_micro_batch_size_per_gpu": 1, + "wall_clock_breakdown": false +} \ No newline at end of file diff --git a/vision_niah_d/easy_context/dist_flash_attn/README.md b/vision_niah_d/easy_context/dist_flash_attn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2025265c3ecc2111a24081dc00b1d5d0b8d7b44b --- /dev/null +++ b/vision_niah_d/easy_context/dist_flash_attn/README.md @@ -0,0 +1,11 @@ +# LightSeq +Taken from https://github.com/RulinShao/LightSeq. All credits to the authors. + +``` +@article{li2023lightseq, + title={LIGHTSEQ: SEQUENCE LEVEL PARALLELISM FOR DISTRIBUTED TRAINING OF LONG CONTEXT TRANS}, + author={Li, Dacheng and Shao, Rulin and Xie𝑠, Anze and Xing𝑐𝑚, Eric P and Gonzalez𝑏, Joseph E and Stoica𝑏, Ion and Ma𝑢, Xuezhe and Zhang𝑠, Hao}, + journal={arXiv preprint arXiv:2310.03294}, + year={2023} +} +``` \ No newline at end of file diff --git a/vision_niah_d/easy_context/dist_flash_attn/__pycache__/async_communication.cpython-310.pyc b/vision_niah_d/easy_context/dist_flash_attn/__pycache__/async_communication.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b327c0e446644255e73322f436b21231ec0c6b9 Binary files /dev/null and b/vision_niah_d/easy_context/dist_flash_attn/__pycache__/async_communication.cpython-310.pyc differ diff --git a/vision_niah_d/easy_context/dist_flash_attn/__pycache__/lightseq_async_attn.cpython-310.pyc b/vision_niah_d/easy_context/dist_flash_attn/__pycache__/lightseq_async_attn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69880b8b1e902a85102dddd640c3c0d7c33658e4 Binary files /dev/null and b/vision_niah_d/easy_context/dist_flash_attn/__pycache__/lightseq_async_attn.cpython-310.pyc differ diff --git a/vision_niah_d/easy_context/dist_flash_attn/__pycache__/monkey_patch.cpython-310.pyc b/vision_niah_d/easy_context/dist_flash_attn/__pycache__/monkey_patch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14bf8d7abf9ed28cea07591d969123d9c8a4cc47 Binary files /dev/null and b/vision_niah_d/easy_context/dist_flash_attn/__pycache__/monkey_patch.cpython-310.pyc differ diff --git a/vision_niah_d/easy_context/dist_flash_attn/__pycache__/prepare_input.cpython-310.pyc b/vision_niah_d/easy_context/dist_flash_attn/__pycache__/prepare_input.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b15f36290456872ef409ed54d997c8b2fde7fd5 Binary files /dev/null and b/vision_niah_d/easy_context/dist_flash_attn/__pycache__/prepare_input.cpython-310.pyc differ diff --git a/vision_niah_d/easy_context/dist_flash_attn/async_communication.py b/vision_niah_d/easy_context/dist_flash_attn/async_communication.py new file mode 100644 index 0000000000000000000000000000000000000000..610080ea3b6fa66bccff12657ff17794c2f70141 --- /dev/null +++ b/vision_niah_d/easy_context/dist_flash_attn/async_communication.py @@ -0,0 +1,527 @@ +import threading +import math +import os + +import torch +import torch.distributed as dist +from torch.distributed import batch_isend_irecv, P2POp, isend, irecv + +# Sequence parallel group that the current rank belongs to. +_SEQUENCE_PARALLEL_GROUP = None + +# These values enable us to change the sequence parallel sizes on the fly. +_SEQUENCE_PARALLEL_SIZE = None +_SEQUENCE_PARALLEL_RANK = None + +# Global buffer for P2P +_PEER_Q = None +_PEER_K = None +_PEER_V = None +_PEER_M = None +_PEER_L = None +_PEER_O = None +_PEER_Q_BWD = None +_PEER_K_BWD = None +_PEER_V_BWD = None +_PEER_O_BWD = None + +_DELTA_DQ = None +_PEER_L = None +_DELTA_DK = None +_DELTA_DV = None +_DK_DELTA_FROM_PEER = None +_DV_DELTA_FROM_PEER = None +_PEER_DO = None + + +_fwd_send_volume = 0 +_fwd_recv_volume = 0 +_bwd_send_volume = 0 +_bwd_recv_volume = 0 + +def initialize_distributed(): + if dist.is_initialized(): + if dist.get_rank() == 0: + print( + "torch distributed is already initialized, " + "skipping initialization ...", + flush=True, + ) + else: + if int(os.environ["RANK"]) == 0: + print("Initializing Torch distributed.") + dist.init_process_group(backend="nccl") + local_world_size = int(os.environ["LOCAL_WORLD_SIZE"]) + global_world_size = dist.get_world_size() + torch.cuda.set_device(dist.get_rank() % local_world_size) + + _initialize_sequence_parallel() + # create_nccl_communicators() + +def _initialize_sequence_parallel(sequence_parallel_size=None): + # Get world size and rank. Ensure some consistencies. + assert sequence_parallel_size is None, "Multiple sequence parallel group not implemented." + assert torch.distributed.is_initialized() + world_size: int = torch.distributed.get_world_size() + + if sequence_parallel_size is None: + sequence_parallel_size = world_size + else: + assert world_size % sequence_parallel_size == 0 + num_sequence_parallel_groups: int = world_size // sequence_parallel_size + + rank = torch.distributed.get_rank() + + # Build the sequence parallel groups. + global _SEQUENCE_PARALLEL_GROUP + global _SEQUENCE_PARALLEL_RANK + global _SEQUENCE_PARALLEL_SIZE + + assert ( + _SEQUENCE_PARALLEL_GROUP is None + ), 'sequence parallel group is already initialized' + for i in range(num_sequence_parallel_groups): + ranks = range(i * sequence_parallel_size, (i + 1) * sequence_parallel_size) + group = torch.distributed.new_group(ranks) + if rank in ranks: + _SEQUENCE_PARALLEL_GROUP = group + _SEQUENCE_PARALLEL_RANK = ranks.index(rank) + _SEQUENCE_PARALLEL_SIZE = len(ranks) + + if dist.get_rank() == 0: + print("************ Finish sequence pralell group Initialization. ***********") + # _set_global_memory_buffer() + +def maybe_get_set_global_memory_buffer(q, k, v, m, l, o): + global _PEER_Q, _PEER_K, _PEER_V, _PEER_M, _PEER_L, _PEER_O + if _PEER_Q is None: + try: + if get_sequence_parallel_rank() == 0: + print("Initializing global memoery buffer.") + except: + print("Initializing global memoery buffer.") + _PEER_Q = [torch.empty_like(q) for _ in range(2)] + _PEER_K = [torch.empty_like(k) for _ in range(2)] + _PEER_V = [torch.empty_like(v) for _ in range(2)] + _PEER_M = [torch.empty_like(m) for _ in range(2)] + _PEER_L = [torch.empty_like(l) for _ in range(2)] + _PEER_O = [torch.empty_like(o) for _ in range(2)] + + return _PEER_Q, _PEER_K, _PEER_V, _PEER_M, _PEER_L, _PEER_O + +def maybe_get_set_global_memory_buffer_bwd(dq, dk, dv, q, L, k, v, o, do): + global _DELTA_DQ, _DELTA_DK, _DELTA_DV, _DK_DELTA_FROM_PEER, _DV_DELTA_FROM_PEER,_PEER_Q_BWD, _PEER_L, _PEER_K_BWD, _PEER_V_BWD, _PEER_O_BWD, _PEER_DO + if _DELTA_DQ is None: + try: + if get_sequence_parallel_rank() == 0: + print("Initializing global memoery buffer for backward.") + except: + print("Initializing global memoery buffer for backward.") + _DELTA_DQ = [torch.empty_like(dq) for _ in range(2)] + _DELTA_DK = [torch.empty_like(dk) for _ in range(2)] + _DELTA_DV = [torch.empty_like(dv) for _ in range(2)] + _PEER_L = [torch.empty_like(L) for _ in range(2)] + + _DK_DELTA_FROM_PEER = torch.empty_like(dk) + _DV_DELTA_FROM_PEER = torch.empty_like(dv) + + # may already be initailized in the forward call. + # current forward and backward needs a transpose in q's format + _PEER_Q_BWD = [torch.empty_like(q) for _ in range(2)] + _PEER_K_BWD = [torch.empty_like(k) for _ in range(2)] + _PEER_V_BWD = [torch.empty_like(v) for _ in range(2)] + _PEER_O_BWD = [torch.empty_like(o) for _ in range(2)] + + _PEER_DO = [torch.empty_like(do) for _ in range(2)] + + return _DELTA_DQ, _DELTA_DK, _DELTA_DV, _DK_DELTA_FROM_PEER, _DV_DELTA_FROM_PEER, _PEER_Q_BWD, _PEER_L, _PEER_K_BWD, _PEER_V_BWD, _PEER_O_BWD, _PEER_DO + +def reset_global_memory_buffer(): + global _PEER_Q, _PEER_K, _PEER_V, _PEER_M, _PEER_L, _PEER_O, _DELTA_DQ, _PEER_L, _DELTA_DK, _DELTA_DV, _DK_DELTA_FROM_PEER, _DV_DELTA_FROM_PEER, _PEER_DO + _PEER_Q = None + _PEER_K = None + _PEER_V = None + _PEER_M = None + _PEER_L = None + _PEER_O = None + + _DELTA_DQ = None + _PEER_L = None + _DELTA_DK = None + _DELTA_DV = None + _DK_DELTA_FROM_PEER = None + _DV_DELTA_FROM_PEER = None + _PEER_DO = None + +# Pytorch defers the creation of nccl communicators to the first P2P call, +# We manually create them so the first isend does not hang without an irecv. +# reference: https://github.com/pytorch/pytorch/blob/main/torch/csrc/cuda/nccl.cpp#L138 +# Only support even number of GPUs. +def create_nccl_communicators(): + seq_rank = get_sequence_parallel_rank() + seq_group = get_sequence_parallel_group() + + empty_tensor = torch.empty(1,).cuda() + empty_tensor_2 = torch.empty(1,).cuda() + if torch.distributed.get_rank() % 2 == 0: + # sender + op1 = P2POp(op=isend, tensor=torch.empty(1,).cuda(), peer=seq_rank+1, group=seq_group) + op2 = P2POp(op=irecv, tensor=torch.empty(1,).cuda(), peer=seq_rank+1, group=seq_group) + #req = torch.distributed.isend(tensor=empty_tensor, dst=seq_rank + 1, group=seq_group) + dist.batch_isend_irecv([op1, op2]) + else: + # receiver + op1 = P2POp(op=irecv, tensor=torch.empty(1,).cuda(), peer=seq_rank-1, group=seq_group) + op2 = P2POp(op=isend, tensor=torch.empty(1,).cuda(), peer=seq_rank-1, group=seq_group) + #req = torch.distributed.isend(tensor=empty_tensor, dst=seq_rank + 1, group=seq_group) + handles = dist.batch_isend_irecv([op1, op2]) + #req = torch.distributed.irecv(tensor=empty_tensor, src=seq_rank - 1, group=seq_group) + dist.all_reduce(empty_tensor, group=seq_group) + +def get_sequence_parallel_group(): + """Get the sequence parallel group the caller rank belongs to.""" + #global _SEQUENCE_PARALLEL_GROUP + assert ( + _SEQUENCE_PARALLEL_GROUP is not None + ), 'sequence parallel group is not initialized' + return _SEQUENCE_PARALLEL_GROUP + +def get_sequence_parallel_rank(): + """Return my rank for the sequence parallel group.""" + global _SEQUENCE_PARALLEL_RANK + if _SEQUENCE_PARALLEL_RANK is not None: + return _SEQUENCE_PARALLEL_RANK + return torch.distributed.get_rank(group=get_sequence_parallel_group()) + +def get_sequence_parallel_size(): + """Return my rank for the sequence parallel group.""" + global _SEQUENCE_PARALLEL_SIZE + if _SEQUENCE_PARALLEL_SIZE is not None: + return _SEQUENCE_PARALLEL_SIZE + return torch.distributed.get_world_size(group=get_sequence_parallel_group()) + +def destroy_sequence_parallel(): + """Set the groups to none.""" + global _SEQUENCE_PARALLEL_GROUP + _SEQUENCE_PARALLEL_GROUP = None + +# whether this is the last time the kernel being called +def is_last_time(time_step): + # e.g. on a 8-GPU setup: + # R=0: 0 + # R=1: 1 + # R=2: 2 + # R=3: 3 + # R=4: 4, 5, 6, 7 + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + if seq_rank <= seq_world_size // 2: # no one helps these ranks + rank_finish_time = seq_rank + else: + rank_finish_time = seq_world_size // 2 + return rank_finish_time == time_step + +# Whether the current time step is computing for local q +def is_compute_for_local_query(time_step): + # R=3,4,5,6,7: Yes + # R=0: 0 + # R=1: 0, 1 + # R=2: 0, 1, 2 + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + if seq_rank >= min(seq_world_size // 2, time_step): + return True + return False + +# Whether the current time step is idle +def is_idle(time_step): + # 0, 1, 2, 3: 4 + # 4, 5, 6, 7: No + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + + if seq_rank < (seq_world_size // 2) and time_step == seq_world_size // 2: + return True + return False + +# Whether the current time step needs to synchronize with a remote computed result +def is_sync_from_remote(time_step): + # R=0, 1, 2, 3, 4: No + # R=5: 4 + # R=6: 3, 4 + # R=7: 2, 3, 4 + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + if seq_rank > max(seq_world_size // 2, seq_world_size - time_step): + return True + return False + +def maybe_send_recv_fwd_qkvo(q: torch.Tensor, peer_q: torch.Tensor, + k: torch.Tensor, peer_k: torch.Tensor, + v: torch.Tensor, peer_v: torch.Tensor, + o_stats: list,# peer_o_stats: list, + time_step: int, comm_mode, debug=False) -> torch.Tensor: + + seq_group = get_sequence_parallel_group() + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + + # Handles for operations that actually need to be wait before going to the next iteration. + # For instance, QKV sender never needs to wait -> it seems fusing these calls help scheduler; + all_handles = [] + # KV logic: different than older version, every rank to send/recv its own kv, + # to balance communication. In a balanced communication, every step each rank + # should send/recv 4 tensors in total (kv, or qo). For instance, rank 0 when + # time step > 0, should send its own kv and send/recv qo. In the older version, + # rank 0 does not send its kv, and rely on a later rank to pass it, where the + # later rank has to (1) receive kv, send rank 0's kv and send/recv qo. + # Q (load balancing) logic: semantically, this will be "%" world size, so + # the same send/recv rank as KV. Note: Only support even number of machines. + # O (load balancing) logic: rank 0 sends result to rank 7 at time 1. + # It get delayed for one time step, and thus has different maybe_send/recv_rank. + # Use (time_step + 1) to easily convert to synchornize version. + maybe_send_rank = seq_rank + (time_step + 1) + maybe_recv_rank = seq_rank - (time_step + 1) + + if debug: + global _fwd_send_volume, _fwd_recv_volume, _bwd_send_volume, _bwd_recv_volume + _debug_send = _fwd_send_volume + _debug_recv = _fwd_recv_volume + + if maybe_send_rank >= seq_world_size: + #send q, no one needs to do remote computation in the last time step + if time_step < (seq_world_size // 2 - 1): + #print(f"t={time_step}: R={seq_rank} sends q to {maybe_send_rank % seq_world_size} (not wait)") + #q_send_handles.append(P2POp(op=isend, tensor=q, peer=maybe_send_rank % seq_world_size, group=seq_group)) + all_handles.append(P2POp(op=isend, tensor=q, peer=maybe_send_rank % seq_world_size, group=seq_group)) + if debug: + _fwd_send_volume += torch.numel(q) * q.element_size() + else: + # send kv + #print(f"t={time_step}: R={seq_rank} sends kv to {maybe_send_rank} (not wait)") + #kv_send_handles.append(P2POp(op=isend, tensor=k, peer=maybe_send_rank, group=seq_group)) + #kv_send_handles.append(P2POp(op=isend, tensor=v, peer=maybe_send_rank, group=seq_group)) + all_handles.append(P2POp(op=isend, tensor=k, peer=maybe_send_rank, group=seq_group)) + all_handles.append(P2POp(op=isend, tensor=v, peer=maybe_send_rank, group=seq_group)) + if debug: + _fwd_send_volume += torch.numel(k) * k.element_size() + _fwd_send_volume += torch.numel(v) * v.element_size() + + if maybe_recv_rank < 0: + # recv q, no one needs to do remote computation in the last time step + if time_step < (seq_world_size // 2 - 1): + # print(f"t={time_step}: R={seq_rank} receives q from {maybe_recv_rank % seq_world_size} (wait)") + #q_recv_handles.append(P2POp(op=irecv, tensor=peer_q, peer=maybe_recv_rank % seq_world_size, group=seq_group)) + all_handles.append(P2POp(op=irecv, tensor=peer_q, peer=maybe_recv_rank % seq_world_size, group=seq_group)) + if debug: + _fwd_recv_volume += torch.numel(peer_q) * peer_q.element_size() + else: + # recv kv + #print(f"t={time_step}: R={seq_rank} receivs kv from {maybe_recv_rank} (wait)") + #kv_recv_handles.append(P2POp(op=irecv, tensor=peer_k, peer=maybe_recv_rank, group=seq_group)) + #kv_recv_handles.append(P2POp(op=irecv, tensor=peer_v, peer=maybe_recv_rank, group=seq_group)) + all_handles.append(P2POp(op=irecv, tensor=peer_k, peer=maybe_recv_rank, group=seq_group)) + all_handles.append(P2POp(op=irecv, tensor=peer_v, peer=maybe_recv_rank, group=seq_group)) + if debug: + _fwd_recv_volume += torch.numel(peer_k) * peer_k.element_size() + _fwd_recv_volume += torch.numel(peer_v) * peer_v.element_size() + + maybe_send_rank_o = seq_rank - (time_step - 1) + maybe_recv_rank_o = seq_rank + (time_step - 1) + if maybe_send_rank_o < 0 and time_step > 1: + for t in o_stats: + # print(f"t={time_step}: R={seq_rank} sends o to {maybe_send_rank_o % seq_world_size} (wait)") + #o_send_handles.append(P2POp(op=isend, tensor=t, peer=maybe_send_rank_o % seq_world_size, group=seq_group)) + all_handles.append(P2POp(op=isend, tensor=t, peer=maybe_send_rank_o % seq_world_size, group=seq_group)) + if debug: + _fwd_send_volume += torch.numel(t) * t.element_size() + if maybe_recv_rank_o >= seq_world_size and time_step > 1 : + for t in o_stats: + # print(f"t={time_step}: R={seq_rank} receives o from {maybe_recv_rank_o % seq_world_size} (wait)") + #o_recv_handles.append(P2POp(op=irecv, tensor=t, peer=maybe_recv_rank_o % seq_world_size, group=seq_group)) + all_handles.append(P2POp(op=irecv, tensor=t, peer=maybe_recv_rank_o % seq_world_size, group=seq_group)) + if debug: + _fwd_recv_volume += torch.numel(t) * t.element_size() + + #reqs = [] + + if debug: + if seq_rank in [0, 8]: + print(f"R={seq_rank} time_step={time_step} increases: send {(_fwd_send_volume - _debug_send) * 1e-9} GB recv {(_fwd_recv_volume - _debug_recv) * 1e-9} GB") + #return reqs + all_reqs = launch_async_handles(all_handles, comm_mode) + return [all_reqs] + +# delta: may be you are using it for your local compute or as a distributed buffer to send to others +# .. Sorry for the bad naming.. +def maybe_send_recv_bwd_qkvo(dq_delta: torch.Tensor, dk_delta: torch.Tensor, + dv_delta: torch.Tensor, dk_delta_from_peer: torch.Tensor, + dv_delta_from_peer: torch.Tensor, q: torch.Tensor, + peer_q: torch.Tensor, L: torch.Tensor, + peer_L: torch.Tensor, k: torch.Tensor, + peer_k: torch.Tensor, v: torch.Tensor, + peer_v: torch.Tensor, o: torch.Tensor, + peer_o: torch.Tensor, do: torch.Tensor, + peer_do: torch.Tensor, time_step: int, comm_mode, debug=False): + + seq_group = get_sequence_parallel_group() + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + + all_handles = [] + maybe_send_rank = seq_rank + (time_step + 1) + maybe_recv_rank = seq_rank - (time_step + 1) + + if debug: + global _fwd_send_volume, _fwd_recv_volume, _bwd_send_volume, _bwd_recv_volume + + if maybe_send_rank >= seq_world_size: + #send q, no one needs to do remote computation in the last time step + if time_step < (seq_world_size // 2 - 1): + all_handles.append(P2POp(op=isend, tensor=q, peer=maybe_send_rank % seq_world_size, group=seq_group)) + all_handles.append(P2POp(op=isend, tensor=L, peer=maybe_send_rank % seq_world_size, group=seq_group)) + all_handles.append(P2POp(op=isend, tensor=o, peer=maybe_send_rank % seq_world_size, group=seq_group)) + all_handles.append(P2POp(op=isend, tensor=do, peer=maybe_send_rank % seq_world_size, group=seq_group)) + if debug: + _bwd_send_volume += torch.numel(q) * q.element_size() + _bwd_send_volume += torch.numel(L) * L.element_size() + _bwd_send_volume += torch.numel(o) * o.element_size() + _bwd_send_volume += torch.numel(do) * do.element_size() + else: + # send kv + all_handles.append(P2POp(op=isend, tensor=k, peer=maybe_send_rank, group=seq_group)) + all_handles.append(P2POp(op=isend, tensor=v, peer=maybe_send_rank, group=seq_group)) + if debug: + _bwd_send_volume += torch.numel(k) * k.element_size() + _bwd_send_volume += torch.numel(v) * v.element_size() + + if maybe_recv_rank < 0: + # recv q, no one needs to do remote computation in the last time step + if time_step < (seq_world_size // 2 - 1): + all_handles.append(P2POp(op=irecv, tensor=peer_q, peer=maybe_recv_rank % seq_world_size, group=seq_group)) + all_handles.append(P2POp(op=irecv, tensor=peer_L, peer=maybe_recv_rank % seq_world_size, group=seq_group)) + all_handles.append(P2POp(op=irecv, tensor=peer_o, peer=maybe_recv_rank % seq_world_size, group=seq_group)) + all_handles.append(P2POp(op=irecv, tensor=peer_do, peer=maybe_recv_rank % seq_world_size, group=seq_group)) + if debug: + _bwd_recv_volume += torch.numel(peer_q) * peer_q.element_size() + _bwd_recv_volume += torch.numel(peer_L) * peer_L.element_size() + _bwd_recv_volume += torch.numel(peer_o) * peer_o.element_size() + _bwd_recv_volume += torch.numel(peer_do) * peer_do.element_size() + else: + # recv kv + all_handles.append(P2POp(op=irecv, tensor=peer_k, peer=maybe_recv_rank, group=seq_group)) + all_handles.append(P2POp(op=irecv, tensor=peer_v, peer=maybe_recv_rank, group=seq_group)) + if debug: + _bwd_recv_volume += torch.numel(peer_k) * peer_k.element_size() + _bwd_recv_volume += torch.numel(peer_v) * peer_v.element_size() + + # Whether I should update dq, dk and dv after waiting these requests + is_update_dq = False + is_update_dkv = False + + maybe_send_rank_dqkv = seq_rank - (time_step - 1) + maybe_recv_rank_dqkv = seq_rank + (time_step - 1) + + if time_step > 1: + if maybe_send_rank_dqkv < 0: + #print(f"BWD t={time_step}: R={seq_rank} sends dq delta to {maybe_send_rank_dqkv % seq_world_size}") + all_handles.append(P2POp(op=isend, tensor=dq_delta, peer=maybe_send_rank_dqkv % seq_world_size, group=seq_group)) + if debug: + _bwd_send_volume += torch.numel(dq_delta) * dq_delta.element_size() + else: + #print(f"BWD t={time_step}: R={seq_rank} sends dkv delta to {maybe_send_rank_dqkv}") + all_handles.append(P2POp(op=isend, tensor=dk_delta, peer=maybe_send_rank_dqkv, group=seq_group)) + all_handles.append(P2POp(op=isend, tensor=dv_delta, peer=maybe_send_rank_dqkv, group=seq_group)) + if debug: + _bwd_send_volume += torch.numel(dk_delta) * dk_delta.element_size() + _bwd_send_volume += torch.numel(dv_delta) * dv_delta.element_size() + + if maybe_recv_rank_dqkv >= seq_world_size: + #print(f"BWD t={time_step}: R={seq_rank} receives dq delta to {maybe_recv_rank_dqkv % seq_world_size}") + all_handles.append(P2POp(op=irecv, tensor=dq_delta, peer=maybe_recv_rank_dqkv % seq_world_size, group=seq_group)) + is_update_dq = True + if debug: + _bwd_recv_volume += torch.numel(dq_delta) * dq_delta.element_size() + else: + #print(f"BWD t={time_step}: R={seq_rank} receives dk dv delta from {maybe_recv_rank_dqkv}") + all_handles.append(P2POp(op=irecv, tensor=dk_delta_from_peer, peer=maybe_recv_rank_dqkv, group=seq_group)) + all_handles.append(P2POp(op=irecv, tensor=dv_delta_from_peer, peer=maybe_recv_rank_dqkv, group=seq_group)) + is_update_dkv = True + if debug: + _bwd_recv_volume += torch.numel(dk_delta_from_peer) * dk_delta_from_peer.element_size() + _bwd_recv_volume += torch.numel(dv_delta_from_peer) * dv_delta_from_peer.element_size() + + # return [], is_update_dq, is_update_dkv + all_reqs = launch_async_handles(all_handles, comm_mode) + return [all_reqs], is_update_dq, is_update_dkv + +def maybe_send_recv_bwd_last_dkv(dk_delta: torch.Tensor, dv_delta: torch.Tensor, time_step, comm_mode, debug=False): + is_update_last_dkv = False + + seq_group = get_sequence_parallel_group() + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + + if seq_world_size == 1: return [], is_update_last_dkv + + all_handles = [] + + if debug: + global _fwd_send_volume, _fwd_recv_volume, _bwd_send_volume, _bwd_recv_volume + + if time_step == seq_world_size // 2: + maybe_send_rank = seq_rank - time_step + maybe_recv_rank = seq_rank + time_step + + assert (maybe_send_rank >= 0) ^ (maybe_recv_rank < seq_world_size), "R={seq_rank} should be either sending or receiving dkv in the last time step." + + if maybe_send_rank >= 0: + # print(f"BWD t={time_step}: R={seq_rank} last send dkv to {maybe_send_rank}") + all_handles.append(P2POp(op=isend, tensor=dk_delta, peer=maybe_send_rank, group=seq_group)) + all_handles.append(P2POp(op=isend, tensor=dv_delta, peer=maybe_send_rank, group=seq_group)) + if debug: + _bwd_send_volume += torch.numel(dk_delta) * dk_delta.element_size() + _bwd_send_volume += torch.numel(dv_delta) * dv_delta.element_size() + if maybe_recv_rank < seq_world_size: + # print(f"BWD t={time_step}: R={seq_rank} last receive dkv from {maybe_recv_rank}") + all_handles.append(P2POp(op=irecv, tensor=dk_delta, peer=maybe_recv_rank, group=seq_group)) + all_handles.append(P2POp(op=irecv, tensor=dv_delta, peer=maybe_recv_rank, group=seq_group)) + if debug: + _bwd_recv_volume += torch.numel(dk_delta) * dk_delta.element_size() + _bwd_recv_volume += torch.numel(dv_delta) * dv_delta.element_size() + is_update_last_dkv = True + + # return [], is_update_last_dkv + all_reqs = launch_async_handles(all_handles, comm_mode) + + return [all_reqs], is_update_last_dkv + +def print_and_reset_comm_stats(): + seq_rank = get_sequence_parallel_rank() + + global _fwd_send_volume, _fwd_recv_volume, _bwd_send_volume, _bwd_recv_volume + _fwd_send_volume *= 1e-9 + _fwd_recv_volume *= 1e-9 + _bwd_send_volume *= 1e-9 + _bwd_recv_volume *= 1e-9 + + print(f"R={seq_rank} fwd send: {_fwd_send_volume} fwd recv: {_fwd_recv_volume}; bwd send: {_bwd_send_volume}, bwd recv: {_bwd_recv_volume} GB.") + _fwd_send_volume = 0 + _fwd_recv_volume = 0 + _bwd_send_volume = 0 + _bwd_recv_volume = 0 + +def launch_async_handles(handles, comm_mode): + global _args + if comm_mode == "nocomm": + #print("skipping communication for ablation") + return [] + if len(handles) > 0: + return dist.batch_isend_irecv(handles) + return [] + +def wait_async_handles(reqs): + if len(reqs) > 0: + for req in reqs: + for r in req: + r.wait() \ No newline at end of file diff --git a/vision_niah_d/easy_context/dist_flash_attn/lightseq_async_attn.py b/vision_niah_d/easy_context/dist_flash_attn/lightseq_async_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..d776495bc3a01cd7f0e37c96f45dfe577126cf09 --- /dev/null +++ b/vision_niah_d/easy_context/dist_flash_attn/lightseq_async_attn.py @@ -0,0 +1,743 @@ +import os +import math + +from einops import rearrange +import argparse + +import pytest +import torch +import torch.distributed as dist +from torch.distributed import ReduceOp +#from torch.profiler import profile, record_function, ProfilerActivity +import functools +import triton +import triton.language as tl +import time +import numpy as np +from tqdm import tqdm + +try: + from flash_attn.flash_attn_interface import _flash_attn_forward, _flash_attn_backward +except: + pass + +from .async_communication import (is_last_time, is_compute_for_local_query, is_sync_from_remote, is_idle, print_and_reset_comm_stats, + launch_async_handles, wait_async_handles, maybe_send_recv_fwd_qkvo, maybe_send_recv_bwd_qkvo, maybe_send_recv_bwd_last_dkv, reset_global_memory_buffer, + maybe_get_set_global_memory_buffer, maybe_get_set_global_memory_buffer_bwd, initialize_distributed, get_sequence_parallel_size, get_sequence_parallel_rank) + +@triton.jit +def max_fn(x, y): + return tl.math.max(x, y) + +@triton.jit +def _rescale_kernel( + peer_m, + m, + peer_l, + l, + peer_o, + o, + L, + stride_oz, stride_oh, stride_om, stride_on, + Z, H, N_CTX, + BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, + BLOCK_N: tl.constexpr, + LAST_STEP: tl.constexpr, +): + start_m = tl.program_id(0) + off_hz = tl.program_id(1) + o_offset = off_hz * stride_oh + peer_o_block_ptr = tl.make_block_ptr( + base=peer_o + o_offset, + shape=(N_CTX, BLOCK_DMODEL), + strides=(stride_om, stride_on), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0) + ) + o_block_ptr = tl.make_block_ptr( + base=o + o_offset, + shape=(N_CTX, BLOCK_DMODEL), + strides=(stride_om, stride_on), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0) + ) + # initialize offsets + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + offs_n = tl.arange(0, BLOCK_N) + + peer_m_ptrs = peer_m + off_hz * N_CTX + offs_m + m_ptrs = m + off_hz * N_CTX + offs_m + peer_l_ptrs = peer_l + off_hz * N_CTX + offs_m + l_ptrs = l + off_hz * N_CTX + offs_m + + peer_m_i = tl.load(peer_m_ptrs) + peer_m_i = peer_m_i.to(tl.float32) + m_i = tl.load(m_ptrs) + m_i = m_i.to(tl.float32) + peer_l_i = tl.load(peer_l_ptrs) + peer_l_i = peer_l_i.to(tl.float32) + l_i = tl.load(l_ptrs) + l_i = l_i.to(tl.float32) + + peer_acc = tl.load(peer_o_block_ptr) + peer_acc = peer_acc.to(tl.float32) + acc = tl.load(o_block_ptr) + acc = acc.to(tl.float32) + lo = 0 + hi = N_CTX + m_i_sync = tl.maximum(m_i, peer_m_i) + alpha = tl.math.exp2(m_i - m_i_sync) + peer_alpha = tl.math.exp2(peer_m_i - m_i_sync) + # -- scale and update acc -- + acc_scale = l_i * 0 + alpha # workaround some compiler bug + peer_acc_scale = peer_l_i * 0 + peer_alpha # workaround some compiler bug + + acc *= acc_scale[:, None] + peer_acc *= peer_acc_scale[:, None] + acc += peer_acc + l_i = l_i * acc_scale + peer_l_i * peer_acc_scale + # write back O, l, m + tl.store(m_ptrs, m_i_sync) + tl.store(l_ptrs, l_i) + if LAST_STEP: + acc = acc / l_i[:, None] + L_ptrs = L + off_hz * N_CTX + offs_m + tl.store(L_ptrs, m_i_sync / 1.44269504 + tl.math.log(l_i)) + tl.store(o_block_ptr, acc.to(tl.bfloat16)) + +@triton.jit +def _fwd_kernel( + Q, K, V, sm_scale, + m, + l, + O, + L, + stride_qz, stride_qh, stride_qm, stride_qk, + stride_kz, stride_kh, stride_kn, stride_kk, + stride_vz, stride_vh, stride_vk, stride_vn, + stride_oz, stride_oh, stride_om, stride_on, + Z, H, N_CTX, + BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, + BLOCK_N: tl.constexpr, + IS_CAUSAL: tl.constexpr, + LAST_STEP: tl.constexpr +): + start_m = tl.program_id(0) + off_hz = tl.program_id(1) + qvk_offset = off_hz * stride_qh + Q_block_ptr = tl.make_block_ptr( + base=Q + qvk_offset, + shape=(N_CTX, BLOCK_DMODEL), + strides=(stride_qm, stride_qk), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0) + ) + K_block_ptr = tl.make_block_ptr( + base=K + qvk_offset, + shape=(BLOCK_DMODEL, N_CTX), + strides=(stride_kk, stride_kn), + offsets=(0, 0), + block_shape=(BLOCK_DMODEL, BLOCK_N), + order=(0, 1) + ) + V_block_ptr = tl.make_block_ptr( + base=V + qvk_offset, + shape=(N_CTX, BLOCK_DMODEL), + strides=(stride_vk, stride_vn), + offsets=(0, 0), + block_shape=(BLOCK_N, BLOCK_DMODEL), + order=(1, 0) + ) + O_block_ptr = tl.make_block_ptr( + base=O + qvk_offset, + shape=(N_CTX, BLOCK_DMODEL), + strides=(stride_om, stride_on), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0) + ) + # initialize offsets + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + offs_n = tl.arange(0, BLOCK_N) + # initialize pointer to m and l -> load from provided pointer + m_ptrs = m + off_hz * N_CTX + offs_m + l_ptrs = l + off_hz * N_CTX + offs_m + m_i = tl.load(m_ptrs) + m_i = m_i.to(tl.float32) + l_i = tl.load(l_ptrs) + l_i = l_i.to(tl.float32) + acc = tl.load(O_block_ptr) + acc = acc.to(tl.float32) + # scale sm_scale by log_2(e) and use + # 2^x instead of exp in the loop because CSE and LICM + # don't work as expected with `exp` in the loop + qk_scale = sm_scale * 1.44269504 + # load q: it will stay in SRAM throughout + q = tl.load(Q_block_ptr) + q = (q * qk_scale).to(tl.bfloat16) + # loop over k, v and update accumulator + lo = 0 + hi = (start_m + 1) * BLOCK_M if IS_CAUSAL else N_CTX + for start_n in range(lo, hi, BLOCK_N): + # -- load k, v -- + k = tl.load(K_block_ptr) + v = tl.load(V_block_ptr) + # -- compute qk --- + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + if IS_CAUSAL: + qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf")) + qk += tl.dot(q, k) + # -- compute scaling constant --- + m_i_new = tl.maximum(m_i, tl.max(qk, 1)) + alpha = tl.math.exp2(m_i - m_i_new) + p = tl.math.exp2(qk - m_i_new[:, None]) + # -- scale and update acc -- + acc_scale = l_i * 0 + alpha # workaround some compiler bug + acc *= acc_scale[:, None] + acc += tl.dot(p.to(tl.bfloat16), v) + # -- update m_i and l_i -- + l_i = l_i * alpha + tl.sum(p, 1) + m_i = m_i_new + # update pointers + K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) + V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) + # write back original l and m + tl.store(m_ptrs, m_i) + tl.store(l_ptrs, l_i) + # write back O, L + if LAST_STEP: + acc = acc / l_i[:, None] + L_ptrs = L + off_hz * N_CTX + offs_m + tl.store(L_ptrs, m_i / 1.44269504 + tl.math.log(l_i)) + tl.store(O_block_ptr, acc.to(tl.bfloat16)) + +# for gqa/mqa to expand kv heads +def maybe_repeat_kv_fwd(nqh, kv): + bs, nkvh, slen, hdim = kv.shape + n_rep = nqh // nkvh + if n_rep == 1: + return kv + kv_expand = kv[:, :, None, :, :].expand(bs, nkvh, n_rep, slen, hdim) + return kv_expand.reshape(bs, nkvh * n_rep, slen, hdim) + +def maybe_repeat_kv_bwd(nqh, kv): + bs, slen, nkvh, hdim = kv.shape + n_rep = nqh // nkvh + if n_rep == 1: + return kv + kv_expand = kv[:, :, :, None, :].expand(bs, slen, nkvh, n_rep, hdim) + return kv_expand.reshape(bs, slen, nkvh * n_rep, hdim) + +# kv grad has shape bs, slen, nqh, hdim +def maybe_reduce_dkv(nkvh, dkv): + bs, slen, nqh, hdim = dkv.shape + n_rep = nqh // nkvh + if n_rep == 1: + return dkv + dkv_reshape = dkv.view(bs, slen, nkvh, n_rep, hdim) + return torch.sum(dkv_reshape, dim=3) + + +def _lightseq_forward(q, k, v, causal, sm_scale, comm_mode): + # maybe_contiguous = lambda x: x.contiguous() if x.stride(-1) != 1 else x + # q, k, v = [maybe_contiguous(x) for x in (q, k, v)] + + # shape constraints + Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1] + assert Lq == Lk and Lk == Lv + assert Lk in {16, 32, 64, 128} + # Why do I have to change it from 128 64 to 32 32? + BLOCK_M = 32 + BLOCK_N = 32 + + bsz, nh, seq_len, hdim = q.shape + + m = torch.full((bsz * nh, seq_len), fill_value=-float("inf"), device=q.device, dtype=torch.float32) + l = torch.zeros_like(m) + L = torch.zeros_like(m) + o = torch.zeros_like(q) + + grid = (triton.cdiv(seq_len, BLOCK_M), bsz * nh, 1) + num_warps = 4 if Lk <= 64 else 8 + + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + + # Initialize all buffers + peer_q, peer_k, peer_v, peer_m, peer_l, peer_o = maybe_get_set_global_memory_buffer(q, k, v, m, l, o) + + fwd_launch_helper = lambda q, k, v, m, l, o, L, IS_CAUSAL, LAST_STEP: _fwd_kernel[grid]( + q, k, v, sm_scale, + m, + l, + o, + L, + q.stride(0), q.stride(1), q.stride(2), q.stride(3), + k.stride(0), k.stride(1), k.stride(2), k.stride(3), + v.stride(0), v.stride(1), v.stride(2), v.stride(3), + o.stride(0), o.stride(1), o.stride(2), o.stride(3), + q.shape[0], q.shape[1], q.shape[2], + BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk, + IS_CAUSAL=IS_CAUSAL, + LAST_STEP=LAST_STEP, + num_warps=num_warps, + num_stages=4) + + for time_step in range(seq_world_size // 2 + 1): + # This is important for cuda scheduler to execute nccl calls first. + torch.cuda.synchronize() + # Communication uses buffer_idx_1, and compute uses buffer_idx_2, which effectively are contents from the last time step. + buffer_idx_1 = time_step % 2 + buffer_idx_2 = (time_step - 1) % 2 + + reqs = maybe_send_recv_fwd_qkvo(q, peer_q[buffer_idx_1], k, peer_k[buffer_idx_1], v, peer_v[buffer_idx_1], + [peer_o[buffer_idx_1], peer_m[buffer_idx_1], peer_l[buffer_idx_1]], time_step, comm_mode) + if comm_mode == "sync": + # if seq_rank == 0: + # print("Immediate wait for abalation") + wait_async_handles(reqs) + if is_compute_for_local_query(time_step): + # print(f"t={time_step}: (Comp) R={seq_rank} local compute") + if time_step == 0: + fwd_launch_helper(q, maybe_repeat_kv_fwd(q.shape[1], k), maybe_repeat_kv_fwd(q.shape[1], v), m, l, o, L, True, is_last_time(time_step)) + else: + # if needs to sync from others, do not normalize here + fwd_launch_helper(q, maybe_repeat_kv_fwd(q.shape[1], peer_k[buffer_idx_2]), maybe_repeat_kv_fwd(q.shape[1], peer_v[buffer_idx_2]), m, l, o, L, False, not is_sync_from_remote(time_step) and is_last_time(time_step)) + elif is_idle(time_step): + # print(f"t={time_step}: (Comp) R={seq_rank} idle") + pass + else: + # print(f"t={time_step}: (Comp) R={seq_rank} helps other") + peer_m[buffer_idx_2] = torch.full_like(m, fill_value=-float("inf")) + peer_l[buffer_idx_2] = torch.zeros_like(l) + peer_o[buffer_idx_2] = torch.zeros_like(o) + + #print(f"rank 3 q is: {peer_q[buffer_idx_2]}") + fwd_launch_helper(peer_q[buffer_idx_2], maybe_repeat_kv_fwd(q.shape[1], k), maybe_repeat_kv_fwd(q.shape[1], v), peer_m[buffer_idx_2], peer_l[buffer_idx_2], peer_o[buffer_idx_2], None, False, False) + + if comm_mode == "lightseq": + # Make sure tensors for next steps are ready + wait_async_handles(reqs) + # sync between statistics get from other ranks and the local ones + if is_sync_from_remote(time_step): + _rescale_kernel[grid]( + peer_m[buffer_idx_1], + m, + peer_l[buffer_idx_1], + l, + peer_o[buffer_idx_1], + o, + L, + o.stride(0), o.stride(1), o.stride(2), o.stride(3), + o.shape[0], o.shape[1], o.shape[2], + BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk, + LAST_STEP=is_last_time(time_step), + num_warps=num_warps, + num_stages=4) + return q, k, v, o, L + +def _lightseq_backward(do, q, k, v, o, L, sm_scale, comm_mode, backward_engine): + BLOCK = 128 + q, k, v, o, do = [rearrange(_x, 'b h s d -> b s h d').contiguous() for _x in [q, k, v, o, do]] + L = rearrange(L, '(b h) s -> b h s', b=q.shape[0]) + + dq = torch.empty_like(q) + dk = torch.empty_like(k) + dv = torch.empty_like(v) + + # maybe gqa + nqh = q.shape[2] + nkvh = k.shape[2] + is_gqa = (nqh > nkvh) + + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + + # Initialize all backward buffers + dq_delta, dk_delta, dv_delta, dk_delta_from_peer, dv_delta_from_peer, \ + peer_q, peer_L, peer_k, peer_v, peer_o, peer_do = maybe_get_set_global_memory_buffer_bwd(dq, dk, dv, q, L, k, v, o, do) + + for time_step in range(0, get_sequence_parallel_size() // 2 + 1): + torch.cuda.synchronize() + buffer_idx_1 = time_step % 2 + buffer_idx_2 = (time_step - 1) % 2 + + reqs, is_update_dq, is_update_dkv = maybe_send_recv_bwd_qkvo(dq_delta[buffer_idx_1], dk_delta[buffer_idx_1], dv_delta[buffer_idx_1], dk_delta_from_peer, dv_delta_from_peer, q, peer_q[buffer_idx_1], L, peer_L[buffer_idx_1], k, peer_k[buffer_idx_1], v, peer_v[buffer_idx_1], o, peer_o[buffer_idx_1], do, peer_do[buffer_idx_1], time_step, comm_mode) + if comm_mode == "sync": + # if seq_rank == 0: + # print("(bwd) Immediate wait for abalation") + wait_async_handles(reqs) + + if is_compute_for_local_query(time_step): + if time_step == 0: + if backward_engine == "flash": + _flash_attn_backward(do, q, k, v, o, L, dq, dk, dv, 0.0, sm_scale, True, (-1,-1), None, False) + else: + inp = Inputs(query=q, key=maybe_repeat_kv_bwd(q.shape[2], k), value=maybe_repeat_kv_bwd(q.shape[2], v), attn_bias=xformers.ops.LowerTriangularMask(), p=0, scale=sm_scale) + op_ctx = Context(lse=L, out=o, rng_state=None) + # Let xformers dispatch the correct backend + grads = _memory_efficient_attention_backward(ctx=op_ctx, inp=inp, grad=do, op=None) + dq = grads.dq + dk, dv = maybe_reduce_dkv(nkvh, grads.dk), maybe_reduce_dkv(nkvh, grads.dv) + else: + if backward_engine == "flash": + _flash_attn_backward(do, q, peer_k[buffer_idx_2], peer_v[buffer_idx_2], o, L, dq_delta[buffer_idx_2], dk_delta[buffer_idx_2], dv_delta[buffer_idx_2], 0.0, sm_scale, False, (-1,-1), None, False) + else: + inp = Inputs(query=q, key=maybe_repeat_kv_bwd(q.shape[2], peer_k[buffer_idx_2]), value=maybe_repeat_kv_bwd(q.shape[2], peer_v[buffer_idx_2]), attn_bias=None, p=0, scale=sm_scale) + op_ctx = Context(lse=L, out=o, rng_state=None) + grads = _memory_efficient_attention_backward(ctx=op_ctx, inp=inp, grad=do, op=None) + dq_delta[buffer_idx_2] = grads.dq + dk_delta[buffer_idx_2], dv_delta[buffer_idx_2] = maybe_reduce_dkv(nkvh, grads.dk), maybe_reduce_dkv(nkvh, grads.dv) + dq += dq_delta[buffer_idx_2] + elif is_idle(time_step): + pass + else: + if backward_engine == "flash": + _flash_attn_backward(peer_do[buffer_idx_2], peer_q[buffer_idx_2], k, v, peer_o[buffer_idx_2], peer_L[buffer_idx_2], dq_delta[buffer_idx_2], dk_delta[buffer_idx_2], dv_delta[buffer_idx_2], 0.0, sm_scale, False, (-1,-1), None, False) + else: + inp = Inputs(query=peer_q[buffer_idx_2], key=maybe_repeat_kv_bwd(q.shape[2], k), value=maybe_repeat_kv_bwd(q.shape[2], v), attn_bias=None, p=0, scale=sm_scale) + op_ctx = Context(lse=peer_L[buffer_idx_2], out=peer_o[buffer_idx_2], rng_state=None) + grads = _memory_efficient_attention_backward(ctx=op_ctx, inp=inp, grad=peer_do[buffer_idx_2], op=None) + dq_delta[buffer_idx_2] = grads.dq + dk_delta[buffer_idx_2], dv_delta[buffer_idx_2] = maybe_reduce_dkv(nkvh, grads.dk), maybe_reduce_dkv(nkvh, grads.dv) + dk += dk_delta[buffer_idx_2] + dv += dv_delta[buffer_idx_2] + + if comm_mode == "lightseq": + # Make sure tensors for next steps are ready + wait_async_handles(reqs) + + # The last time step needs to send dk and dv immediately, move it up here to maximize overlap with the following three addition. + reqs, is_update_last_dkv = maybe_send_recv_bwd_last_dkv(dk_delta[buffer_idx_2], dv_delta[buffer_idx_2], time_step, comm_mode) + + if comm_mode == "sync": + # if seq_rank == 0: + # print("(bwd) dkv Immediate wait for abalation") + wait_async_handles(reqs) + # apply dq_delta, dk_delta and dv_delta from remote + if is_update_dq: + dq += dq_delta[buffer_idx_1] + if is_update_dkv: + dk += dk_delta_from_peer + dv += dv_delta_from_peer + + if comm_mode == "lightseq": + wait_async_handles(reqs) + # apply dk_delta and dv_delta to sender + if is_update_last_dkv: + dk += dk_delta[buffer_idx_2] + dv += dv_delta[buffer_idx_2] + + dq, dk, dv = [rearrange(_x, 'b h s d -> b s h d') for _x in [dq, dk, dv]] + return dq, dk, dv + +class _attention(torch.autograd.Function): + @staticmethod + def forward(ctx, q, k, v, causal, sm_scale): + try: + global args + comm_mode = args.comm_mode + backward_engine = args.backward_engine + except: + comm_mode = 'lightseq' + backward_engine = 'flash' + + q, k, v, o, L = _lightseq_forward(q, k, v, causal, sm_scale, comm_mode) + + ctx.save_for_backward(q, k, v, o, L) + ctx.sm_scale = sm_scale + ctx.comm_mode = comm_mode + ctx.backward_engine = backward_engine + return o + + @staticmethod + def backward(ctx, do): + q, k, v, o, L = ctx.saved_tensors + sm_scale = ctx.sm_scale + + dq, dk, dv = _lightseq_backward(do, q, k, v, o, L, sm_scale, ctx.comm_mode, ctx.backward_engine) + return dq, dk, dv, None, None + +attention = _attention.apply + + +#@pytest.mark.parametrize('causal', [False, True]) +#@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(6, 9, 1024, 64)]) +def test_op(Z, H, N_CTX, D_HEAD, causal, dtype=torch.bfloat16): + torch.manual_seed(20) + q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_() + k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_() + v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_() + + rank = dist.get_rank() + world_size = dist.get_world_size() + seq_per_rank = N_CTX // world_size + + sm_scale = 0.5 + dout = torch.randn_like(q) + # reference implementation + M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda")) + p = torch.matmul(q, k.transpose(2, 3)) * sm_scale + assert causal + if causal: + p[:, :, M == 0] = float("-inf") + p = torch.softmax(p.float(), dim=-1).half() + ref_out = torch.matmul(p, v) + ref_out.backward(dout) + ref_dv, v.grad = v.grad.clone(), None + ref_dk, k.grad = k.grad.clone(), None + ref_dq, q.grad = q.grad.clone(), None + + # triton implementation + + a, b, c, d = q.size() + real_q = q[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True) + real_k = k[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True) + real_v = v[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True) + real_do = dout[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True) + + tri_out = attention(real_q, real_k, real_v, causal, sm_scale).half() + + # compare + assert torch.allclose(ref_out[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_out, atol=1e-2, rtol=0), f" rank {rank} fails forward" + print(f" *** rank {rank} passes forward") + tri_out.backward(real_do) + tri_dv, real_v.grad = real_v.grad.clone(), None + tri_dk, real_k.grad = real_k.grad.clone(), None + tri_dq, real_q.grad = real_q.grad.clone(), None + assert torch.allclose(ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dq, atol=1e-2, rtol=0), f" rank {rank} fails backward dq" + assert torch.allclose(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dk, atol=1e-2, rtol=0), f"rank {rank} fails backward dk" #f" {ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dk} {torch.max(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dk)} rank {rank} fails backward dk" + assert torch.allclose(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dv, atol=1e-2, rtol=0), f"rank {rank} fails backward dv {ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dv} {torch.max(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dv)} rank {rank} fails backward dv" + print(f"rank {rank} passes backward") + + +def test_gqa(Z, H, KVH, N_CTX, D_HEAD, causal, dtype=torch.bfloat16): + torch.manual_seed(177) + q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_() + k = torch.empty((Z, KVH, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_() + v = torch.empty((Z, KVH, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_() + + rank = dist.get_rank() + world_size = dist.get_world_size() + seq_per_rank = N_CTX // world_size + + sm_scale = 0.5 + dout = torch.randn_like(q) + # torch reference implementation + M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda")) + ref_k = maybe_repeat_kv_fwd(q.shape[1], k).clone().detach().requires_grad_(True) + ref_v = maybe_repeat_kv_fwd(q.shape[1], v).clone().detach().requires_grad_(True) + p = torch.matmul(q, ref_k.transpose(2,3)) * sm_scale + assert causal + if causal: + p[:, :, M == 0] = float("-inf") + p = torch.softmax(p.float(), dim=-1).half() + ref_out = torch.matmul(p, ref_v) + ref_out.backward(dout) + ref_dv, v.grad = ref_v.grad.clone(), None + ref_dv = (maybe_reduce_dkv(KVH, ref_dv.transpose(1,2))).transpose(1,2) + ref_dk, k.grad = ref_k.grad.clone(), None + ref_dk = (maybe_reduce_dkv(KVH, ref_dk.transpose(1,2))).transpose(1,2) + ref_dq, q.grad = q.grad.clone(), None + + # flash reference + from flash_attn import flash_attn_qkvpacked_func, flash_attn_func + flash_q = q.transpose(1,2).clone().detach().requires_grad_(True) + flash_k = k.transpose(1,2).clone().detach().requires_grad_(True) + flash_v = v.transpose(1,2).clone().detach().requires_grad_(True) + flash_ref_out = flash_attn_func(flash_q, flash_k, flash_v, 0, sm_scale, True) + flash_ref_out.backward(dout.transpose(1,2)) + flash_ref_out = flash_ref_out.transpose(1,2) + flash_ref_dv, v.grad = flash_v.grad.clone(), None + flash_ref_dv = flash_ref_dv.transpose(1,2) + flash_ref_dk, k.grad = flash_k.grad.clone(), None + flash_ref_dk = flash_ref_dk.transpose(1,2) + flash_ref_dq, q.grad = flash_q.grad.clone(), None + flash_ref_dq = flash_ref_dq.transpose(1,2) + + # triton implementation + + a, b, c, d = q.size() + real_q = q[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True) + real_k = k[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, KVH, -1, d).contiguous().clone().detach().requires_grad_(True) + real_v = v[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, KVH, -1, d).contiguous().clone().detach().requires_grad_(True) + real_do = dout[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True) + + tri_out = attention(real_q, real_k, real_v, causal, sm_scale).half() + + # compare + assert torch.allclose(flash_ref_out[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_out, atol=1e-2, rtol=0), f" rank {rank} fails forward against flash" + print(f" *** rank {rank} passes forward") + tri_out.backward(real_do) + tri_dv, real_v.grad = real_v.grad.clone(), None + tri_dk, real_k.grad = real_k.grad.clone(), None + tri_dq, real_q.grad = real_q.grad.clone(), None + assert torch.allclose(flash_ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dq, atol=1e-2, rtol=0), f" rank {rank} fails backward dq against flash" + #print(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].shape, ref_dk.shape, tri_dk.shape) + assert torch.allclose(flash_ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dk, atol=1e-2, rtol=0), f"rank {rank} fails backward dk against flash {flash_ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dk} {torch.max(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dk)} rank {rank} fails backward dk" + assert torch.allclose(flash_ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dv, atol=1e-2, rtol=0), f"rank {rank} fails backward dv against flash {flash_ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dv} {torch.max(flash_ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dv)} rank {rank} fails backward dv" + print(f"rank {rank} passes backward against flash") + + assert torch.allclose(ref_out[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_out, atol=1e-2, rtol=0), f" rank {rank} fails forward" + print(f" *** rank {rank} passes forward") + assert torch.allclose(ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dq, atol=1e-2, rtol=0), f" rank {rank} fails backward dq" + #print(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].shape, ref_dk.shape, tri_dk.shape) + assert torch.allclose(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dk, atol=1e-2, rtol=0), f"rank {rank} fails backward dk {ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dk} {torch.max(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dk)} rank {rank} fails backward dk" + assert torch.allclose(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dv, atol=1e-2, rtol=0), f"rank {rank} fails backward dv {ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dv} {torch.max(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dv)} rank {rank} fails backward dv" + print(f"rank {rank} passes backward") + +#BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64 +try: + from flash_attn.flash_attn_interface import \ + flash_attn_qkvpacked_func as flash_attn_func + FLASH_VER = 2 +except BaseException: + try: + from flash_attn.flash_attn_interface import flash_attn_func + FLASH_VER = 1 + except BaseException: + FLASH_VER = None +HAS_FLASH = FLASH_VER is not None +HAS_FLASH = None +ONLY_FLASH = False + +#BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64 +BATCH, N_HEADS, N_CTX, D_HEAD = 1, 32, None, 128 +# vary seq length for fixed head and batch=4 +configs = [triton.testing.Benchmark( + x_names=['N_CTX'], + x_vals=[2**i for i in range(18, 19)],#[ 20, 21]],#[10, 11, 12, 13, 14, 15, 16, 17, 18]], + line_arg='provider', + line_vals=['triton'] if not ONLY_FLASH else [] + (['flash'] if HAS_FLASH else []), + line_names=['Triton'] if not ONLY_FLASH else [] + ([f'Flash-{FLASH_VER}'] if HAS_FLASH else []), + styles=[('red', '-'), ('blue', '-')], + ylabel='ms', + plot_name=f'fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-{mode}-{causal}', + args={'H': N_HEADS, 'BATCH': BATCH, 'D_HEAD': D_HEAD, 'dtype': torch.bfloat16, 'mode': mode, 'causal': causal} +) for mode in ["all"] for causal in [True]] + +# @triton.testing.perf_report(configs) +def bench_flash_attention(BATCH, H, KVH, N_CTX, D_HEAD, causal, mode, provider, args, dtype=torch.bfloat16, device="cuda"): + assert mode == "all" #mode in ['fwd', 'bwd'] + n_warmup = 10 + n_repeat = 10 + cache = torch.empty(int(256e6), dtype=torch.int8, device='cuda') + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + if provider == "triton": + q = torch.randn((BATCH, H, N_CTX // seq_world_size, D_HEAD), dtype=dtype, device="cuda", requires_grad=True) + k = torch.randn((BATCH, KVH, N_CTX // seq_world_size, D_HEAD), dtype=dtype, device="cuda", requires_grad=True) + v = torch.randn((BATCH, KVH, N_CTX // seq_world_size, D_HEAD), dtype=dtype, device="cuda", requires_grad=True) + if seq_rank == 0: + print(f"Benchmarking per GPU qkv shape: {q.shape}") + sm_scale = 1.3 + fwd_fn = lambda: attention(q, k, v, causal, sm_scale) + if provider == "flash": + qkv = torch.randn((BATCH, N_CTX, 3, H, D_HEAD), dtype=dtype, device=device, requires_grad=True) + if FLASH_VER == 1: + lengths = torch.full((BATCH,), fill_value=N_CTX, device=device) + cu_seqlens = torch.zeros((BATCH + 1,), device=device, dtype=torch.int32) + cu_seqlens[1:] = lengths.cumsum(0) + qkv = qkv.reshape(BATCH * N_CTX, 3, H, D_HEAD) + fwd_fn = lambda: flash_attn_func(qkv, cu_seqlens, 0., N_CTX, causal=causal) + elif FLASH_VER == 2: + fwd_fn = lambda: flash_attn_func(qkv, causal=causal) + else: + raise ValueError(f'unknown {FLASH_VER = }') + + flops_per_matmul = 2. * BATCH * H * N_CTX * N_CTX * D_HEAD / seq_world_size + attn_flops = 2 * flops_per_matmul + + assert causal + if causal: + attn_flops *= 0.5 + fwd_flops = attn_flops + bwd_flops = attn_flops * 2.5 # 2.0(bwd) + 0.5(recompute) + + o = fwd_fn() + do = torch.randn_like(o) + bwd_fn = lambda: o.backward(do, retain_graph=True) + + def run_benchmark(fn): + time_list = [] + for _ in tqdm(range(n_warmup)): + cache.zero_() + fn() + torch.cuda.synchronize() + if args.debug: + print_and_reset_comm_stats() + for i in tqdm(range(n_repeat)): + cache.zero_() + torch.cuda.synchronize() + time_s = time.time() + fn() + torch.cuda.synchronize() + time_e = time.time() + time_list.append((time_e - time_s) * 1000.0) + if args.debug: + print_and_reset_comm_stats() + return np.asarray(time_list) + + fwd_time_arr = run_benchmark(fwd_fn) + bwd_time_arr = run_benchmark(bwd_fn) + + fwd_flops_ps = fwd_flops / np.mean(fwd_time_arr) * 1e-9 + print(f"(FWD) R={seq_rank} avg: {np.mean(fwd_time_arr)}, std: {np.std(fwd_time_arr)} flops: {fwd_flops_ps} \n") + + bwd_flops_ps = bwd_flops / np.mean(bwd_time_arr) * 1e-9 + print(f"(BWD) R={seq_rank} avg: {np.mean(bwd_time_arr)}, std: {np.std(bwd_time_arr)} flops: {bwd_flops_ps} \n") + + # total + total_time_arr = fwd_time_arr + bwd_time_arr + total_flops = fwd_flops + bwd_flops + total_flops_ps = total_flops / np.mean(total_time_arr) * 1e-9 + print(f"(Total) R={seq_rank} avg: {np.mean(total_time_arr)}, std: {np.std(total_time_arr)} flops: {total_flops_ps} \n") + + #return total_flops_ps + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--comm-mode", type=str, default="lightseq") + parser.add_argument("--debug", action="store_true") + parser.add_argument("--run-mode", type=str, default="benchmark") + parser.add_argument("--bs", type=int, default=1) + parser.add_argument("--n_heads", type=int, default=32) + parser.add_argument("--n_kvheads", type=int, default=32) + parser.add_argument("--d_head", type=int, default=128) + parser.add_argument("--start_ctx", type=int, default=12) + parser.add_argument("--end_ctx", type=int, default=18) + parser.add_argument("--forward_engine", type=str, default="triton") + parser.add_argument("--backward_engine", type=str, default="flash") + + global args + args = parser.parse_args() + initialize_distributed() + + assert args.forward_engine == "triton", "Only triton forward is implmented." + assert args.backward_engine in ["flash", "xformers"], "Only flash or xformers backward is implemented." + + if args.backward_engine == "flash": + from flash_attn.flash_attn_interface import _flash_attn_forward, _flash_attn_backward + else: + try: + import xformers.ops + from xformers.ops.fmha.common import Inputs, Context + from xformers.ops.fmha import _memory_efficient_attention_backward + from xformers.ops.fmha import cutlass, flash + except ImportError: + print("xformers not found! Please install it before trying to use it.") + + if args.run_mode == "benchmark": + for N_CTX in [2**i for i in range(args.start_ctx, args.end_ctx)]: + bench_flash_attention(args.bs, args.n_heads, args.n_kvheads, N_CTX, args.d_head, True, "all", "triton", args)#.run(save_path='.', print_data=True) + reset_global_memory_buffer() + else: + assert args.run_mode == "test" + for N_CTX in [2048, 4096]: + test_op(1, 16, N_CTX, 128, True) + #test_gqa(1, 16, 8, N_CTX, 128, True) + reset_global_memory_buffer() diff --git a/vision_niah_d/easy_context/dist_flash_attn/lightseq_async_attn_varlen.py b/vision_niah_d/easy_context/dist_flash_attn/lightseq_async_attn_varlen.py new file mode 100644 index 0000000000000000000000000000000000000000..388ecd4c81d70f41754e3f4af594f66575db281e --- /dev/null +++ b/vision_niah_d/easy_context/dist_flash_attn/lightseq_async_attn_varlen.py @@ -0,0 +1,772 @@ +import os +import math + +from einops import rearrange +import argparse + +import pytest +import torch +import torch.distributed as dist +from torch.distributed import ReduceOp +#from torch.profiler import profile, record_function, ProfilerActivity + +import triton +import triton.language as tl +import time +import numpy as np +from tqdm import tqdm + +try: + from flash_attn.flash_attn_interface import _flash_attn_varlen_backward +except: + pass + +from .async_communication import (is_last_time, is_compute_for_local_query, is_sync_from_remote, is_idle, print_and_reset_comm_stats, + launch_async_handles, wait_async_handles, maybe_send_recv_fwd_qkvo, maybe_send_recv_bwd_qkvo, maybe_send_recv_bwd_last_dkv, reset_global_memory_buffer, + maybe_get_set_global_memory_buffer, maybe_get_set_global_memory_buffer_bwd, initialize_distributed, get_sequence_parallel_size, get_sequence_parallel_rank) + +@triton.jit +def max_fn(x, y): + return tl.math.max(x, y) + +@triton.jit +def _rescale_kernel( + peer_m, + m, + peer_l, + l, + peer_o, + o, + L, + stride_oz, stride_oh, stride_om, stride_on, + Z, H, N_CTX, + seqlen_q_rounded, seqlen_peer_q_rounded, + BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, + BLOCK_N: tl.constexpr, + LAST_STEP: tl.constexpr, +): + start_m = tl.program_id(0) + off_hz = tl.program_id(1) + o_offset = off_hz * stride_oh + peer_o_block_ptr = tl.make_block_ptr( + base=peer_o + o_offset, + shape=(N_CTX, BLOCK_DMODEL), + strides=(stride_om, stride_on), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0) + ) + o_block_ptr = tl.make_block_ptr( + base=o + o_offset, + shape=(N_CTX, BLOCK_DMODEL), + strides=(stride_om, stride_on), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0) + ) + # initialize offsets + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + offs_n = tl.arange(0, BLOCK_N) + + peer_m_ptrs = peer_m + off_hz * seqlen_peer_q_rounded + offs_m + m_ptrs = m + off_hz * seqlen_q_rounded + offs_m + peer_l_ptrs = peer_l + off_hz * seqlen_peer_q_rounded + offs_m + l_ptrs = l + off_hz * seqlen_q_rounded + offs_m + + peer_m_i = tl.load(peer_m_ptrs) + peer_m_i = peer_m_i.to(tl.float32) + m_i = tl.load(m_ptrs) + m_i = m_i.to(tl.float32) + peer_l_i = tl.load(peer_l_ptrs) + peer_l_i = peer_l_i.to(tl.float32) + l_i = tl.load(l_ptrs) + l_i = l_i.to(tl.float32) + + peer_acc = tl.load(peer_o_block_ptr)#, boundary_check=(0, 1), padding_option='zero') + peer_acc = peer_acc.to(tl.float32) + acc = tl.load(o_block_ptr) #, boundary_check=(0, 1), padding_option='zero') + acc = acc.to(tl.float32) + lo = 0 + hi = N_CTX + m_i_sync = tl.maximum(m_i, peer_m_i) + alpha = tl.math.exp2(m_i - m_i_sync) + peer_alpha = tl.math.exp2(peer_m_i - m_i_sync) + # -- scale and update acc -- + acc_scale = l_i * 0 + alpha # workaround some compiler bug + peer_acc_scale = peer_l_i * 0 + peer_alpha # workaround some compiler bug + + acc *= acc_scale[:, None] + peer_acc *= peer_acc_scale[:, None] + acc += peer_acc + l_i = l_i * acc_scale + peer_l_i * peer_acc_scale + # write back O, l, m + tl.store(m_ptrs, m_i_sync) + tl.store(l_ptrs, l_i) + if LAST_STEP: + acc = acc / l_i[:, None] + L_ptrs = L + off_hz * N_CTX + offs_m + tl.store(L_ptrs, m_i_sync / 1.44269504 + tl.math.log(l_i)) + tl.store(o_block_ptr, acc.to(tl.bfloat16), boundary_check=(0, 1)) + +@triton.jit +def _fwd_kernel( + Q, K, V, sm_scale, + m, + l, + O, + L, + stride_qz, stride_qh, stride_qm, stride_qk, + stride_kz, stride_kh, stride_kn, stride_kk, + stride_vz, stride_vh, stride_vk, stride_vn, + stride_oz, stride_oh, stride_om, stride_on, + Z, H, N_CTX, + seqlen_q_rounded, + BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, + BLOCK_N: tl.constexpr, + IS_CAUSAL: tl.constexpr, + LAST_STEP: tl.constexpr +): + start_m = tl.program_id(0) + off_hz = tl.program_id(1) + qvk_offset = off_hz * stride_qh + Q_block_ptr = tl.make_block_ptr( + base=Q + qvk_offset, + shape=(N_CTX, BLOCK_DMODEL), + strides=(stride_qm, stride_qk), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0) + ) + K_block_ptr = tl.make_block_ptr( + base=K + qvk_offset, + shape=(BLOCK_DMODEL, N_CTX), + strides=(stride_kk, stride_kn), + offsets=(0, 0), + block_shape=(BLOCK_DMODEL, BLOCK_N), + order=(0, 1) + ) + V_block_ptr = tl.make_block_ptr( + base=V + qvk_offset, + shape=(N_CTX, BLOCK_DMODEL), + strides=(stride_vk, stride_vn), + offsets=(0, 0), + block_shape=(BLOCK_N, BLOCK_DMODEL), + order=(1, 0) + ) + O_block_ptr = tl.make_block_ptr( + base=O + qvk_offset, + shape=(N_CTX, BLOCK_DMODEL), + strides=(stride_om, stride_on), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0) + ) + # initialize offsets + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + offs_n = tl.arange(0, BLOCK_N) + # initialize pointer to m and l -> load from provided pointer + # (TODO): Why float32? + m_ptrs = m + off_hz * seqlen_q_rounded + offs_m + l_ptrs = l + off_hz * seqlen_q_rounded + offs_m + m_i = tl.load(m_ptrs) + m_i = m_i.to(tl.float32) + l_i = tl.load(l_ptrs) + l_i = l_i.to(tl.float32) + acc = tl.load(O_block_ptr) + acc = acc.to(tl.float32) + # scale sm_scale by log_2(e) and use + # 2^x instead of exp in the loop because CSE and LICM + # don't work as expected with `exp` in the loop + qk_scale = sm_scale * 1.44269504 + # load q: it will stay in SRAM throughout + q = tl.load(Q_block_ptr, boundary_check=(0,), padding_option='zero') + q = (q * qk_scale).to(tl.bfloat16) + # loop over k, v and update accumulator + lo = 0 + hi = (start_m + 1) * BLOCK_M if IS_CAUSAL else N_CTX + for start_n in range(lo, hi, BLOCK_N): + # -- load k, v -- + k = tl.load(K_block_ptr, boundary_check=(1,), padding_option='zero') + v = tl.load(V_block_ptr, boundary_check=(0,), padding_option='zero') + # -- compute qk --- + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + if IS_CAUSAL: + qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf")) + qk += tl.dot(q, k) + # -- compute scaling constant --- + m_i_new = tl.maximum(m_i, tl.max(qk, 1)) + alpha = tl.math.exp2(m_i - m_i_new) + p = tl.math.exp2(qk - m_i_new[:, None]) + # -- scale and update acc -- + acc_scale = l_i * 0 + alpha # workaround some compiler bug + acc *= acc_scale[:, None] + acc += tl.dot(p.to(tl.bfloat16), v) + # -- update m_i and l_i -- + l_i = l_i * alpha + tl.sum(p, 1) + m_i = m_i_new + # update pointers + K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) + V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) + # write back original l and m + tl.store(m_ptrs, m_i) + tl.store(l_ptrs, l_i) + # write back O, L + if LAST_STEP: + acc = acc / l_i[:, None] + L_ptrs = L + off_hz * seqlen_q_rounded + offs_m + tl.store(L_ptrs, m_i / 1.44269504 + tl.math.log(l_i)) + tl.store(O_block_ptr, acc.to(tl.bfloat16), boundary_check=(0, 1)) + +# for gqa/mqa to expand kv heads +def maybe_repeat_kv_fwd(nqh, kv): + bs, nkvh, slen, hdim = kv.shape + n_rep = nqh // nkvh + if n_rep == 1: + return kv + kv_expand = kv[:, :, None, :, :].expand(bs, nkvh, n_rep, slen, hdim) + return kv_expand.reshape(bs, nkvh * n_rep, slen, hdim) + +def maybe_repeat_kv_bwd(nqh, kv): + bs, slen, nkvh, hdim = kv.shape + n_rep = nqh // nkvh + if n_rep == 1: + return kv + kv_expand = kv[:, :, :, None, :].expand(bs, slen, nkvh, n_rep, hdim) + return kv_expand.reshape(bs, slen, nkvh * n_rep, hdim) + +# kv grad has shape bs, slen, nqh, hdim +def maybe_reduce_dkv(nkvh, dkv): + bs, slen, nqh, hdim = dkv.shape + n_rep = nqh // nkvh + if n_rep == 1: + return dkv + #print("*"*100, dkv.shape, bs, slen, nkvh, n_rep, hdim) + dkv_reshape = dkv.view(bs, slen, nkvh, n_rep, hdim) + #print("-"*100, dkv_reshape.shape, bs, slen, nkvh, n_rep, hdim) + return torch.sum(dkv_reshape, dim=3) + + +def _lightseq_forward_varlen(q, k, v, causal, sm_scale, comm_mode): + # maybe_contiguous = lambda x: x.contiguous() if x.stride(-1) != 1 else x + # q, k, v = [maybe_contiguous(x) for x in (q, k, v)] + + # shape constraints + Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1] + # assert Lq == Lk and Lk == Lv + # assert Lk in {16, 32, 64, 128} + BLOCK_M = 128 + BLOCK_N = 64 + + bsz, nh, unpadded_seq_len, hdim = q.shape + cu_seq_lens = torch.arange(0, (bsz+1) * unpadded_seq_len, unpadded_seq_len, dtype=torch.int32, device=q.device) + max_seqlen = unpadded_seq_len + seqlen_q_rounded = math.ceil(q.shape[2] / BLOCK_M) * BLOCK_M + + m = torch.full((bsz * nh, seqlen_q_rounded), fill_value=-float("inf"), device=q.device, dtype=torch.float32) + l = torch.zeros((bsz * nh, seqlen_q_rounded), device=q.device, dtype=torch.float32) + L = torch.zeros((bsz * nh, seqlen_q_rounded), device=q.device, dtype=torch.float32) + o = torch.zeros_like(q) + + grid = (triton.cdiv(q.shape[2], BLOCK_M), bsz * nh, 1) + num_warps = 4 if Lk <= 64 else 8 + + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + + # Initialize all buffers + peer_q, peer_k, peer_v, peer_m, peer_l, peer_o = maybe_get_set_global_memory_buffer(q, k, v, m, l, o) + + fwd_launch_helper = lambda q, k, v, m, l, o, L, IS_CAUSAL, LAST_STEP: _fwd_kernel[grid]( + q, k, v, sm_scale, + m, + l, + o, + L, + q.stride(0), q.stride(1), q.stride(2), q.stride(3), + k.stride(0), k.stride(1), k.stride(2), k.stride(3), + v.stride(0), v.stride(1), v.stride(2), v.stride(3), + o.stride(0), o.stride(1), o.stride(2), o.stride(3), + q.shape[0], q.shape[1], q.shape[2], + seqlen_q_rounded, + BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk, + IS_CAUSAL=IS_CAUSAL, + LAST_STEP=LAST_STEP, + num_warps=num_warps, + num_stages=4) + + for time_step in range(seq_world_size // 2 + 1): + # This is important for cuda scheduler to execute nccl calls first. + torch.cuda.synchronize() + # Communication uses buffer_idx_1, and compute uses buffer_idx_2, which effectively are contents from the last time step. + buffer_idx_1 = time_step % 2 + buffer_idx_2 = (time_step - 1) % 2 + + reqs = maybe_send_recv_fwd_qkvo(q, peer_q[buffer_idx_1], k, peer_k[buffer_idx_1], v, peer_v[buffer_idx_1], + [peer_o[buffer_idx_1], peer_m[buffer_idx_1], peer_l[buffer_idx_1]], time_step, comm_mode) + if comm_mode == "sync": + # if seq_rank == 0: + # print("Immediate wait for abalation") + wait_async_handles(reqs) + if is_compute_for_local_query(time_step): + # print(f"t={time_step}: (Comp) R={seq_rank} local compute") + if time_step == 0: + fwd_launch_helper(q, maybe_repeat_kv_fwd(q.shape[1], k), maybe_repeat_kv_fwd(q.shape[1], v), m, l, o, L, True, is_last_time(time_step)) + else: + # if needs to sync from others, do not normalize here + fwd_launch_helper(q, maybe_repeat_kv_fwd(q.shape[1], peer_k[buffer_idx_2]), maybe_repeat_kv_fwd(q.shape[1], peer_v[buffer_idx_2]), m, l, o, L, False, not is_sync_from_remote(time_step) and is_last_time(time_step)) + elif is_idle(time_step): + # print(f"t={time_step}: (Comp) R={seq_rank} idle") + pass + else: + # print(f"t={time_step}: (Comp) R={seq_rank} helps other") + peer_m[buffer_idx_2] = torch.full_like(m, fill_value=-float("inf")) + peer_l[buffer_idx_2] = torch.zeros_like(l) + peer_o[buffer_idx_2] = torch.zeros_like(o) + + #print(f"rank 3 q is: {peer_q[buffer_idx_2]}") + fwd_launch_helper(peer_q[buffer_idx_2], maybe_repeat_kv_fwd(q.shape[1], k), maybe_repeat_kv_fwd(q.shape[1], v), peer_m[buffer_idx_2], peer_l[buffer_idx_2], peer_o[buffer_idx_2], None, False, False) + + if comm_mode == "lightseq": + # Make sure tensors for next steps are ready + wait_async_handles(reqs) + # sync between statistics get from other ranks and the local ones + if is_sync_from_remote(time_step): +# print(f"t={time_step}: (Comp) R={seq_rank} sync with other - last time: {is_last_time(time_step)}") + seqlen_peer_q_rounded = peer_l[buffer_idx_1].shape[-1] + _rescale_kernel[grid]( + peer_m[buffer_idx_1], + m, + peer_l[buffer_idx_1], + l, + peer_o[buffer_idx_1], + o, + L, + o.stride(0), o.stride(1), o.stride(2), o.stride(3), + o.shape[0], o.shape[1], o.shape[2], + seqlen_q_rounded, seqlen_peer_q_rounded, + BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk, + LAST_STEP=is_last_time(time_step), + num_warps=num_warps, + num_stages=4) + return q, k, v, o, L, cu_seq_lens, max_seqlen + +def _lightseq_backward_varlen(do, q, k, v, o, L, sm_scale, comm_mode, backward_engine, cu_seq_lens, max_seqlen): + BLOCK = 128 + L = rearrange(L[:, :max_seqlen].contiguous(), '(b h) s -> b h s', b=q.shape[0]) + q, k, v, o, do = [rearrange(_x, 'b h s d -> (b s) h d').contiguous() for _x in [q, k, v, o, do]] + + dq = torch.empty_like(q) + dk = torch.empty_like(k) + dv = torch.empty_like(v) + + # maybe gqa + nqh = q.shape[1] + nkvh = k.shape[1] + is_gqa = (nqh > nkvh) + + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + + # Initialize all backward buffers + dq_delta, dk_delta, dv_delta, dk_delta_from_peer, dv_delta_from_peer, \ + peer_q, peer_L, peer_k, peer_v, peer_o, peer_do = maybe_get_set_global_memory_buffer_bwd(dq, dk, dv, q, L, k, v, o, do) + + for time_step in range(0, get_sequence_parallel_size() // 2 + 1): + torch.cuda.synchronize() + buffer_idx_1 = time_step % 2 + buffer_idx_2 = (time_step - 1) % 2 + + reqs, is_update_dq, is_update_dkv = maybe_send_recv_bwd_qkvo(dq_delta[buffer_idx_1], dk_delta[buffer_idx_1], dv_delta[buffer_idx_1], dk_delta_from_peer, dv_delta_from_peer, q, peer_q[buffer_idx_1], L, peer_L[buffer_idx_1], k, peer_k[buffer_idx_1], v, peer_v[buffer_idx_1], o, peer_o[buffer_idx_1], do, peer_do[buffer_idx_1], time_step, comm_mode) + if comm_mode == "sync": + wait_async_handles(reqs) + + if is_compute_for_local_query(time_step): + if time_step == 0: + assert backward_engine == "flash", "We haven't supportted varlen feature in xformer" + if backward_engine == "flash": + _flash_attn_varlen_backward(do, q, k, v, o, L, dq, dk, dv, cu_seq_lens, cu_seq_lens, max_seqlen, max_seqlen, 0.0, sm_scale, True, None) + else: + inp = Inputs(query=q, key=maybe_repeat_kv_bwd(q.shape[2], k), value=maybe_repeat_kv_bwd(q.shape[2], v), attn_bias=xformers.ops.LowerTriangularMask(), p=0, scale=sm_scale) + op_ctx = Context(lse=L, out=o, rng_state=None) + # Let xformers dispatch the correct backend + grads = _memory_efficient_attention_backward(ctx=op_ctx, inp=inp, grad=do, op=None) + dq = grads.dq + dk, dv = maybe_reduce_dkv(nkvh, grads.dk), maybe_reduce_dkv(nkvh, grads.dv) + else: + assert backward_engine == "flash", "We haven't supportted varlen feature in xformer" + if backward_engine == "flash": + _flash_attn_varlen_backward(do, q, peer_k[buffer_idx_2], peer_v[buffer_idx_2], o, L, dq_delta[buffer_idx_2], dk_delta[buffer_idx_2], dv_delta[buffer_idx_2], cu_seq_lens, cu_seq_lens, max_seqlen, max_seqlen, 0.0, sm_scale, False, None) + else: + inp = Inputs(query=q, key=maybe_repeat_kv_bwd(q.shape[2], peer_k[buffer_idx_2]), value=maybe_repeat_kv_bwd(q.shape[2], peer_v[buffer_idx_2]), attn_bias=None, p=0, scale=sm_scale) + op_ctx = Context(lse=L, out=o, rng_state=None) + grads = _memory_efficient_attention_backward(ctx=op_ctx, inp=inp, grad=do, op=None) + dq_delta[buffer_idx_2] = grads.dq + dk_delta[buffer_idx_2], dv_delta[buffer_idx_2] = maybe_reduce_dkv(nkvh, grads.dk), maybe_reduce_dkv(nkvh, grads.dv) + dq += dq_delta[buffer_idx_2] + elif is_idle(time_step): + # print(f"BWD t={time_step}: (Comp) R={seq_rank} idle") + pass + else: + # print(f"BWD t={time_step}: (Comp) R={seq_rank} helps other") + assert backward_engine == "flash", "We haven't supportted varlen feature in xformer" + if backward_engine == "flash": + _flash_attn_varlen_backward(peer_do[buffer_idx_2], peer_q[buffer_idx_2], k, v, peer_o[buffer_idx_2], peer_L[buffer_idx_2], dq_delta[buffer_idx_2], dk_delta[buffer_idx_2], dv_delta[buffer_idx_2], cu_seq_lens, cu_seq_lens, max_seqlen, max_seqlen, 0.0, sm_scale, False, None) + else: + inp = Inputs(query=peer_q[buffer_idx_2], key=maybe_repeat_kv_bwd(q.shape[2], k), value=maybe_repeat_kv_bwd(q.shape[2], v), attn_bias=None, p=0, scale=sm_scale) + op_ctx = Context(lse=peer_L[buffer_idx_2], out=peer_o[buffer_idx_2], rng_state=None) + grads = _memory_efficient_attention_backward(ctx=op_ctx, inp=inp, grad=peer_do[buffer_idx_2], op=None) + dq_delta[buffer_idx_2] = grads.dq + dk_delta[buffer_idx_2], dv_delta[buffer_idx_2] = maybe_reduce_dkv(nkvh, grads.dk), maybe_reduce_dkv(nkvh, grads.dv) + dk += dk_delta[buffer_idx_2] + dv += dv_delta[buffer_idx_2] + + if comm_mode == "lightseq": + # Make sure tensors for next steps are ready + wait_async_handles(reqs) + + # The last time step needs to send dk and dv immediately, move it up here to maximize overlap with the following three addition. + reqs, is_update_last_dkv = maybe_send_recv_bwd_last_dkv(dk_delta[buffer_idx_2], dv_delta[buffer_idx_2], time_step, comm_mode) + + if comm_mode == "sync": + # if seq_rank == 0: + # print("(bwd) dkv Immediate wait for abalation") + wait_async_handles(reqs) + # apply dq_delta, dk_delta and dv_delta from remote + if is_update_dq: + dq += dq_delta[buffer_idx_1] + if is_update_dkv: + dk += dk_delta_from_peer + dv += dv_delta_from_peer + + if comm_mode == "lightseq": + wait_async_handles(reqs) + # apply dk_delta and dv_delta to sender + if is_update_last_dkv: + dk += dk_delta[buffer_idx_2] + dv += dv_delta[buffer_idx_2] + + dq, dk, dv = [rearrange(_x, '(b s) h d -> b h s d', s=max_seqlen) for _x in [dq, dk, dv]] + return dq, dk, dv + +class _attention_varlen(torch.autograd.Function): + @staticmethod + def forward(ctx, q, k, v, causal, sm_scale): + try: + global args + comm_mode = args.comm_mode + backward_engine = args.backward_engine + except: + comm_mode = 'lightseq' + backward_engine = 'flash' + + q, k, v, o, L, cu_seq_lens, max_seqlen = _lightseq_forward_varlen(q, k, v, causal, sm_scale, comm_mode) + + ctx.save_for_backward(q, k, v, o, L, cu_seq_lens) + ctx.max_seqlen = max_seqlen + ctx.sm_scale = sm_scale + ctx.comm_mode = comm_mode + ctx.backward_engine = backward_engine + return o + + @staticmethod + def backward(ctx, do): + q, k, v, o, L, cu_seq_lens = ctx.saved_tensors + sm_scale = ctx.sm_scale + max_seqlen = ctx.max_seqlen + + dq, dk, dv = _lightseq_backward_varlen(do, q, k, v, o, L, sm_scale, ctx.comm_mode, ctx.backward_engine, cu_seq_lens, max_seqlen) + return dq, dk, dv, None, None + +dist_attn_varlen = _attention_varlen.apply + + +#@pytest.mark.parametrize('causal', [False, True]) +#@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(6, 9, 1024, 64)]) +def test_op(Z, H, N_CTX, D_HEAD, causal, dtype=torch.bfloat16): + torch.manual_seed(20) + rank = dist.get_rank() + world_size = dist.get_world_size() + + + PAD = world_size * 256 + seq_per_rank = (N_CTX-PAD) // world_size + q = torch.empty((Z, H, N_CTX-PAD, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_() + k = torch.empty((Z, H, N_CTX-PAD, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_() + v = torch.empty((Z, H, N_CTX-PAD, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_() + + # DEBUG: mask out + #mask = torch.zeros(Z, H, seq_per_rank * (world_size - 1), D_HEAD).cuda() + #mask_2 = torch.ones(Z, H, seq_per_rank, D_HEAD).cuda() + #mask = torch.cat((mask, mask_2), dim=-2).to(dtype) + #q = mask * q + #k = mask * k + #v = mask * v + + sm_scale = 0.5 + dout = torch.randn_like(q) + # reference implementation + M = torch.tril(torch.ones((N_CTX-PAD, N_CTX-PAD), device="cuda")) + p = torch.matmul(q, k.transpose(2, 3)) * sm_scale + assert causal + if causal: + p[:, :, M == 0] = float("-inf") + p = torch.softmax(p.float(), dim=-1).half() + ref_out = torch.matmul(p, v) + ref_out.backward(dout) + ref_dv, v.grad = v.grad.clone(), None + ref_dk, k.grad = k.grad.clone(), None + ref_dq, q.grad = q.grad.clone(), None + + # triton implementation + + a, b, c, d = q.size() + real_q = q[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True) + real_k = k[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True) + real_v = v[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True) + real_do = dout[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True) + + tri_out = dist_attn_varlen(real_q, real_k, real_v, causal, sm_scale).half() + + # compare + assert torch.allclose(ref_out[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_out, atol=1e-2, rtol=0), f" rank {rank} fails forward" + print(f" *** rank {rank} passes forward") + tri_out.backward(real_do) + tri_dv, real_v.grad = real_v.grad.clone(), None + tri_dk, real_k.grad = real_k.grad.clone(), None + tri_dq, real_q.grad = real_q.grad.clone(), None + assert torch.allclose(ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dq, atol=1e-2, rtol=0), f"rank {rank} fails backward dq" #{ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dq} {torch.max(ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dq)} rank {rank} fails backward dk" + assert torch.allclose(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dk, atol=1e-2, rtol=0), f"rank {rank} fails backward dk" #{ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dk} {torch.max(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dk)} rank {rank} fails backward dk" + assert torch.allclose(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dv, atol=1e-2, rtol=0), f"rank {rank} fails backward dv" #{ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dv} {torch.max(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dv)} rank {rank} fails backward dv" + print(f"rank {rank} passes backward") + +#TODO(High Priority): Investigate why rank 0 tends to have larger numerical difference. +def test_gqa(Z, H, KVH, N_CTX, D_HEAD, causal, dtype=torch.bfloat16): + torch.manual_seed(177) + q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_() + k = torch.empty((Z, KVH, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_() + v = torch.empty((Z, KVH, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_() + + rank = dist.get_rank() + world_size = dist.get_world_size() + seq_per_rank = N_CTX // world_size + + sm_scale = 0.5 + dout = torch.randn_like(q) + # torch reference implementation + M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda")) + ref_k = maybe_repeat_kv_fwd(q.shape[1], k).clone().detach().requires_grad_(True) + ref_v = maybe_repeat_kv_fwd(q.shape[1], v).clone().detach().requires_grad_(True) + #print(q.shape, ref_k.shape, k.shape) + p = torch.matmul(q, ref_k.transpose(2,3)) * sm_scale + assert causal + if causal: + p[:, :, M == 0] = float("-inf") + p = torch.softmax(p.float(), dim=-1).half() + ref_out = torch.matmul(p, ref_v) + ref_out.backward(dout) + ref_dv, v.grad = ref_v.grad.clone(), None + #print("Before reduce", ref_dv.shape) + ref_dv = (maybe_reduce_dkv(KVH, ref_dv.transpose(1,2))).transpose(1,2) + #print("After reduce", ref_dv.shape) + ref_dk, k.grad = ref_k.grad.clone(), None + ref_dk = (maybe_reduce_dkv(KVH, ref_dk.transpose(1,2))).transpose(1,2) + ref_dq, q.grad = q.grad.clone(), None + + # flash reference + from flash_attn import flash_attn_qkvpacked_func, flash_attn_func + flash_q = q.transpose(1,2).clone().detach().requires_grad_(True) + flash_k = k.transpose(1,2).clone().detach().requires_grad_(True) + flash_v = v.transpose(1,2).clone().detach().requires_grad_(True) + flash_ref_out = flash_attn_func(flash_q, flash_k, flash_v, 0, sm_scale, True) + flash_ref_out.backward(dout.transpose(1,2)) + flash_ref_out = flash_ref_out.transpose(1,2) + flash_ref_dv, v.grad = flash_v.grad.clone(), None + flash_ref_dv = flash_ref_dv.transpose(1,2) + flash_ref_dk, k.grad = flash_k.grad.clone(), None + flash_ref_dk = flash_ref_dk.transpose(1,2) + flash_ref_dq, q.grad = flash_q.grad.clone(), None + flash_ref_dq = flash_ref_dq.transpose(1,2) + + # triton implementation + + a, b, c, d = q.size() + real_q = q[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True) + real_k = k[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, KVH, -1, d).contiguous().clone().detach().requires_grad_(True) + real_v = v[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, KVH, -1, d).contiguous().clone().detach().requires_grad_(True) + real_do = dout[:,:, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].view(a, b, -1, d).contiguous().clone().detach().requires_grad_(True) + + tri_out = dist_attn_varlen(real_q, real_k, real_v, causal, sm_scale).half() + + # compare + assert torch.allclose(flash_ref_out[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_out, atol=1e-2, rtol=0), f" rank {rank} fails forward against flash" + print(f" *** rank {rank} passes forward") + tri_out.backward(real_do) + tri_dv, real_v.grad = real_v.grad.clone(), None + tri_dk, real_k.grad = real_k.grad.clone(), None + tri_dq, real_q.grad = real_q.grad.clone(), None + assert torch.allclose(flash_ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dq, atol=1e-2, rtol=0), f" rank {rank} fails backward dq against flash" + #print(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].shape, ref_dk.shape, tri_dk.shape) + assert torch.allclose(flash_ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dk, atol=1e-2, rtol=0), f"rank {rank} fails backward dk against flash {flash_ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dk} {torch.max(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dk)} rank {rank} fails backward dk" + assert torch.allclose(flash_ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dv, atol=1e-2, rtol=0), f"rank {rank} fails backward dv against flash {flash_ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dv} {torch.max(flash_ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dv)} rank {rank} fails backward dv" + print(f"rank {rank} passes backward against flash") + + assert torch.allclose(ref_out[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_out, atol=1e-2, rtol=0), f" rank {rank} fails forward" + print(f" *** rank {rank} passes forward") + assert torch.allclose(ref_dq[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dq, atol=1e-2, rtol=0), f" rank {rank} fails backward dq" + #print(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :].shape, ref_dk.shape, tri_dk.shape) + assert torch.allclose(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dk, atol=1e-2, rtol=0), f"rank {rank} fails backward dk {ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dk} {torch.max(ref_dk[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dk)} rank {rank} fails backward dk" + assert torch.allclose(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :], tri_dv, atol=1e-2, rtol=0), f"rank {rank} fails backward dv {ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :]} {tri_dv} {torch.max(ref_dv[:, :, rank * seq_per_rank: (rank + 1) * seq_per_rank, :] - tri_dv)} rank {rank} fails backward dv" + print(f"rank {rank} passes backward") + +#BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64 +try: + from flash_attn.flash_attn_interface import \ + flash_attn_qkvpacked_func as flash_attn_func + FLASH_VER = 2 +except BaseException: + try: + from flash_attn.flash_attn_interface import flash_attn_func + FLASH_VER = 1 + except BaseException: + FLASH_VER = None +HAS_FLASH = FLASH_VER is not None +HAS_FLASH = None +ONLY_FLASH = False + +#BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64 +BATCH, N_HEADS, N_CTX, D_HEAD = 1, 32, None, 128 +# vary seq length for fixed head and batch=4 +configs = [triton.testing.Benchmark( + x_names=['N_CTX'], + x_vals=[2**i for i in range(18, 19)],#[ 20, 21]],#[10, 11, 12, 13, 14, 15, 16, 17, 18]], + line_arg='provider', + line_vals=['triton'] if not ONLY_FLASH else [] + (['flash'] if HAS_FLASH else []), + line_names=['Triton'] if not ONLY_FLASH else [] + ([f'Flash-{FLASH_VER}'] if HAS_FLASH else []), + styles=[('red', '-'), ('blue', '-')], + ylabel='ms', + plot_name=f'fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-{mode}-{causal}', + args={'H': N_HEADS, 'BATCH': BATCH, 'D_HEAD': D_HEAD, 'dtype': torch.bfloat16, 'mode': mode, 'causal': causal} +) for mode in ["all"] for causal in [True]] + +# @triton.testing.perf_report(configs) +def bench_flash_attention(BATCH, H, KVH, N_CTX, D_HEAD, causal, mode, provider, args, dtype=torch.bfloat16, device="cuda"): + assert mode == "all" #mode in ['fwd', 'bwd'] + n_warmup = 10 + n_repeat = 10 + cache = torch.empty(int(256e6), dtype=torch.int8, device='cuda') + seq_rank = get_sequence_parallel_rank() + seq_world_size = get_sequence_parallel_size() + if provider == "triton": + q = torch.randn((BATCH, H, N_CTX // seq_world_size, D_HEAD), dtype=dtype, device="cuda", requires_grad=True) + k = torch.randn((BATCH, KVH, N_CTX // seq_world_size, D_HEAD), dtype=dtype, device="cuda", requires_grad=True) + v = torch.randn((BATCH, KVH, N_CTX // seq_world_size, D_HEAD), dtype=dtype, device="cuda", requires_grad=True) + if seq_rank == 0: + print(f"Benchmarking per GPU qkv shape: {q.shape}") + sm_scale = 1.3 + fwd_fn = lambda: dist_attn_varlen(q, k, v, causal, sm_scale) + if provider == "flash": + qkv = torch.randn((BATCH, N_CTX, 3, H, D_HEAD), dtype=dtype, device=device, requires_grad=True) + if FLASH_VER == 1: + lengths = torch.full((BATCH,), fill_value=N_CTX, device=device) + cu_seqlens = torch.zeros((BATCH + 1,), device=device, dtype=torch.int32) + cu_seqlens[1:] = lengths.cumsum(0) + qkv = qkv.reshape(BATCH * N_CTX, 3, H, D_HEAD) + fwd_fn = lambda: flash_attn_func(qkv, cu_seqlens, 0., N_CTX, causal=causal) + elif FLASH_VER == 2: + fwd_fn = lambda: flash_attn_func(qkv, causal=causal) + else: + raise ValueError(f'unknown {FLASH_VER = }') + + flops_per_matmul = 2. * BATCH * H * N_CTX * N_CTX * D_HEAD / seq_world_size + attn_flops = 2 * flops_per_matmul + + assert causal + if causal: + attn_flops *= 0.5 + fwd_flops = attn_flops + bwd_flops = attn_flops * 2.5 # 2.0(bwd) + 0.5(recompute) + + o = fwd_fn() + do = torch.randn_like(o) + bwd_fn = lambda: o.backward(do, retain_graph=True) + + def run_benchmark(fn): + time_list = [] + for _ in tqdm(range(n_warmup)): + cache.zero_() + fn() + torch.cuda.synchronize() + if args.debug: + print_and_reset_comm_stats() + for i in tqdm(range(n_repeat)): + cache.zero_() + torch.cuda.synchronize() + time_s = time.time() + fn() + torch.cuda.synchronize() + time_e = time.time() + time_list.append((time_e - time_s) * 1000.0) + if args.debug: + print_and_reset_comm_stats() + return np.asarray(time_list) + + fwd_time_arr = run_benchmark(fwd_fn) + bwd_time_arr = run_benchmark(bwd_fn) + + fwd_flops_ps = fwd_flops / np.mean(fwd_time_arr) * 1e-9 + print(f"(FWD) R={seq_rank} avg: {np.mean(fwd_time_arr)}, std: {np.std(fwd_time_arr)} flops: {fwd_flops_ps} \n") + + bwd_flops_ps = bwd_flops / np.mean(bwd_time_arr) * 1e-9 + print(f"(BWD) R={seq_rank} avg: {np.mean(bwd_time_arr)}, std: {np.std(bwd_time_arr)} flops: {bwd_flops_ps} \n") + + # total + total_time_arr = fwd_time_arr + bwd_time_arr + total_flops = fwd_flops + bwd_flops + total_flops_ps = total_flops / np.mean(total_time_arr) * 1e-9 + print(f"(Total) R={seq_rank} avg: {np.mean(total_time_arr)}, std: {np.std(total_time_arr)} flops: {total_flops_ps} \n") + + #return total_flops_ps + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--comm-mode", type=str, default="lightseq") + parser.add_argument("--debug", action="store_true") + parser.add_argument("--run-mode", type=str, default="test") + parser.add_argument("--bs", type=int, default=1) + parser.add_argument("--n_heads", type=int, default=32) + parser.add_argument("--n_kvheads", type=int, default=32) + parser.add_argument("--d_head", type=int, default=128) + parser.add_argument("--start_ctx", type=int, default=12) + parser.add_argument("--end_ctx", type=int, default=18) + parser.add_argument("--forward_engine", type=str, default="triton") + parser.add_argument("--backward_engine", type=str, default="flash") + + global args + args = parser.parse_args() + initialize_distributed() + + assert args.forward_engine == "triton", "Only triton forward is implmented." + assert args.backward_engine in ["flash", "xformers"], "Only flash or xformers backward is implemented." + + if args.backward_engine == "flash": + from flash_attn.flash_attn_interface import _flash_attn_forward, _flash_attn_backward + else: + try: + import xformers.ops + from xformers.ops.fmha.common import Inputs, Context + from xformers.ops.fmha import _memory_efficient_attention_backward + from xformers.ops.fmha import cutlass, flash + except ImportError: + print("xformers not found! Please install it before trying to use it.") + + if args.run_mode == "benchmark": + for N_CTX in [2**i for i in range(args.start_ctx, args.end_ctx)]: + bench_flash_attention(args.bs, args.n_heads, args.n_kvheads, N_CTX, args.d_head, True, "all", "triton", args)#.run(save_path='.', print_data=True) + reset_global_memory_buffer() + else: + assert args.run_mode == "test" + for N_CTX in [4096]: + test_op(2, 16, N_CTX, 128, True) + #test_gqa(1, 16, 8, N_CTX, 128, True) + reset_global_memory_buffer() diff --git a/vision_niah_d/easy_context/dist_flash_attn/monkey_patch.py b/vision_niah_d/easy_context/dist_flash_attn/monkey_patch.py new file mode 100644 index 0000000000000000000000000000000000000000..ab1bab9041e64a1b307597c38757d026b5addb30 --- /dev/null +++ b/vision_niah_d/easy_context/dist_flash_attn/monkey_patch.py @@ -0,0 +1,609 @@ +""" +Materialization-aware gradient checkpointing monkey patch. +""" +from typing import List, Optional, Tuple + +import torch +from torch import nn +from torch.utils.checkpoint import _get_autocast_kwargs, check_backward_validity, get_device_states, set_device_states, detach_variable + +import transformers +from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, BaseModelOutputWithPast + +from einops import rearrange + +from .lightseq_async_attn import _lightseq_forward, _lightseq_backward +from .async_communication import initialize_distributed, reset_global_memory_buffer + + +# define a global buffer to save flash attention outputs +# it's called global because it saves the outputs for all layers +global_flash_attn_out_buffer = None + +# define a local buffer to save recomputed qkv +# it's called local because it's a temporary buffer which will be updated across layers +local_res_grad_buffer = None + +# hooks for the gradients of residual +global_hooks = [] + +def init_flash_attn_buffers(num_layers): + # update the global buffer according to number of layers + global global_flash_attn_out_buffer + global_flash_attn_out_buffer = [None] * num_layers + +def clean_hook(): + # Remove all hooks in the global buffer + for hook in global_hooks: + hook.remove() + # Clear the global buffer + global_hooks.clear() + +def clear_all_buffers_at_the_end_of_training(): + # call it at the end of training + global lobal_flash_attn_out_buffer + global_flash_attn_out_buffer = None + global local_res_grad_buffer + local_res_grad_buffer = None + clean_hook() + +def save_flash_attn_out_to_global_buffer(idx, out): + global global_flash_attn_out_buffer + global_flash_attn_out_buffer[idx] = out + +def get_flash_attn_out_from_global_buffer(idx): + global global_flash_attn_out_buffer + return global_flash_attn_out_buffer[idx] + +def free_flash_attn_out_buffer(idx): + global global_flash_attn_out_buffer + global_flash_attn_out_buffer[idx] = None + +def write_gradient_to_flash_attn_out(idx, grad): + global global_flash_attn_out_buffer + global_flash_attn_out_buffer[idx].grad = grad + +def save_res_grad_hook(grad): + global local_res_grad_buffer + local_res_grad_buffer = grad + +def load_and_add_res_grad_hook(grad): + grad += get_res_grad_from_local_buffer() + +def get_res_grad_from_local_buffer(): + global local_res_grad_buffer + assert local_res_grad_buffer is not None + return local_res_grad_buffer + +class CheckpointFunctionEndWithFlashAttention(torch.autograd.Function): + """ Avoid doing twice flash attention forward during checkpointed backward. + args: + hidden_states, # i.e., flash attention output which is saved in global buffer. + attention_mask, + position_ids, + residual, # the gradient of residual is saved in local buffer to pass across ckpt layers. + """ + + @staticmethod + def forward(ctx, run_function, layer_idx, preserve_rng_state, *args): + check_backward_validity(args) + ctx.run_function = run_function + ctx.layer_idx = layer_idx + ctx.preserve_rng_state = preserve_rng_state + # Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu. + ctx.gpu_autocast_kwargs, ctx.cpu_autocast_kwargs = _get_autocast_kwargs() + if preserve_rng_state: + ctx.fwd_cpu_state = torch.get_rng_state() + # Don't eagerly initialize the cuda context by accident. + # (If the user intends that the context is initialized later, within their + # run_function, we SHOULD actually stash the cuda state here. Unfortunately, + # we have no way to anticipate this will happen before we run the function.) + ctx.had_cuda_in_fwd = False + if torch.cuda._initialized: + ctx.had_cuda_in_fwd = True + ctx.fwd_gpu_devices, ctx.fwd_gpu_states = get_device_states(*args) + + # Save non-tensor inputs in ctx, keep a placeholder None for tensors + # to be filled out during the backward. + ctx.inputs = [] + ctx.tensor_indices = [] + tensor_inputs = [] + for i, arg in enumerate(args): + if i == 0 and ctx.layer_idx != 0: + # flash attention output is saved to the global buffer during forward + ctx.inputs.append(None) + else: + if torch.is_tensor(arg): + tensor_inputs.append(arg) + ctx.tensor_indices.append(i) + ctx.inputs.append(None) + else: + ctx.inputs.append(arg) + + with torch.no_grad(): + q, k, v, residual = run_function(*args) + softmax_scale = q.shape[-1] ** (-0.5) + + # lightseq version + _, _, _, out, softmax_lse = _lightseq_forward(q, k, v, True, softmax_scale, comm_mode='lightseq') + rng_state = None + + # save flash attention output to global buffer + save_flash_attn_out_to_global_buffer(ctx.layer_idx, out) + tensor_inputs += [softmax_lse] + ctx.softmax_scale = softmax_scale + + ctx.save_for_backward(*tensor_inputs) + + return out, residual + + @staticmethod + def backward(ctx, *args): + if not torch.autograd._is_checkpoint_valid(): + raise RuntimeError( + "Checkpointing is not compatible with .grad() or when an `inputs` parameter" + " is passed to .backward(). Please use .backward() and do not pass its `inputs`" + " argument.") + # Copy the list to avoid modifying original list. + inputs = list(ctx.inputs) + tensor_indices = ctx.tensor_indices + tensors = ctx.saved_tensors + tensors, softmax_lse = tensors[:-1], tensors[-1] + + # Fill in inputs with appropriate saved tensors. + # Fill the flash attention output first + if ctx.layer_idx > 0: + # inputs[0] should be flash attention output + inputs[0] = get_flash_attn_out_from_global_buffer(ctx.layer_idx-1) + for i, idx in enumerate(tensor_indices): + inputs[idx] = tensors[i] + + # Stash the surrounding rng state, and mimic the state that was + # present at this time during forward. Restore the surrounding state + # when we're done. + rng_devices = [] + if ctx.preserve_rng_state and ctx.had_cuda_in_fwd: + rng_devices = ctx.fwd_gpu_devices + with torch.random.fork_rng(devices=rng_devices, enabled=ctx.preserve_rng_state): + if ctx.preserve_rng_state: + torch.set_rng_state(ctx.fwd_cpu_state) + if ctx.had_cuda_in_fwd: + set_device_states(ctx.fwd_gpu_devices, ctx.fwd_gpu_states) + detached_inputs = detach_variable(tuple(inputs)) + with torch.enable_grad(), \ + torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs), \ + torch.cpu.amp.autocast(**ctx.cpu_autocast_kwargs): + # Stop recomputation before flash attention + # It is unecessary to run recomputation for flash attn + q, k, v, residual = ctx.run_function(*detached_inputs) + + # run backward() with only tensor that requires grad + # run flash attention backward first: + # get 'dout' from auto_grad inputs + # get 'out' from global buffer + # get 'qkv' from the recomputed tensors + #dq = torch.empty(q.shape, dtype=q.dtype, device=q.device) + #dk = torch.empty(k.shape, dtype=q.dtype, device=q.device) + #dv = torch.empty(v.shape, dtype=q.dtype, device=q.device) + out = get_flash_attn_out_from_global_buffer(ctx.layer_idx) + # todo get dout + dout = args[0] + + # lightseq version + dq, dk, dv = _lightseq_backward(dout, q, k, v, out, softmax_lse, ctx.softmax_scale, comm_mode='lightseq', backward_engine='flash') + #dqkv = torch.stack([dq, dk, dv]) + + # run backward for the part before flash attention + #qkv.backward(dqkv) + torch.autograd.backward([q, k, v], [dq, dk, dv]) + + grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else None + for inp in detached_inputs) + + # write flash attention output gradients to buffer + if ctx.layer_idx > 0: + write_gradient_to_flash_attn_out(ctx.layer_idx-1, detached_inputs[0].grad) + + return (None, None, None) + grads + + +def checkpoint_end_with_flash_attention(function, layer_idx, *args, use_reentrant: bool = True, **kwargs): + # Hack to mix *args with **kwargs in a python 2.7-compliant way + preserve = kwargs.pop('preserve_rng_state', True) + if kwargs and use_reentrant: + raise ValueError("Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)) + + return CheckpointFunctionEndWithFlashAttention.apply(function, layer_idx, preserve, *args) + + +class CheckpointFunctionLastModule(torch.autograd.Function): + """ + for the last ffn layer after flash attention, modifications include: + write the gradients wrt flash attention output and residual to the global buffer. + """ + + @staticmethod + def forward(ctx, run_function, preserve_rng_state, *args): + check_backward_validity(args) + ctx.run_function = run_function + ctx.preserve_rng_state = preserve_rng_state + # Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu. + ctx.gpu_autocast_kwargs, ctx.cpu_autocast_kwargs = _get_autocast_kwargs() + if preserve_rng_state: + ctx.fwd_cpu_state = torch.get_rng_state() + # Don't eagerly initialize the cuda context by accident. + # (If the user intends that the context is initialized later, within their + # run_function, we SHOULD actually stash the cuda state here. Unfortunately, + # we have no way to anticipate this will happen before we run the function.) + ctx.had_cuda_in_fwd = False + if torch.cuda._initialized: + ctx.had_cuda_in_fwd = True + ctx.fwd_gpu_devices, ctx.fwd_gpu_states = get_device_states(*args) + + # Save non-tensor inputs in ctx, keep a placeholder None for tensors + # to be filled out during the backward. + ctx.inputs = [] + ctx.tensor_indices = [] + tensor_inputs = [] + + assert torch.is_tensor(args[0]), "assuming the first tensor is the flash attention output" + for i, arg in enumerate(args): + if torch.is_tensor(arg) and i == 0: + # flash attn output has been saved to global buffer + ctx.inputs.append(None) + elif torch.is_tensor(arg): + tensor_inputs.append(arg) + ctx.tensor_indices.append(i) + ctx.inputs.append(None) + else: + ctx.inputs.append(arg) + + ctx.save_for_backward(*tensor_inputs) + + with torch.no_grad(): + outputs = run_function(*args) + return outputs + + @staticmethod + def backward(ctx, *args): + if not torch.autograd._is_checkpoint_valid(): + raise RuntimeError( + "Checkpointing is not compatible with .grad() or when an `inputs` parameter" + " is passed to .backward(). Please use .backward() and do not pass its `inputs`" + " argument.") + # Copy the list to avoid modifying original list. + inputs = list(ctx.inputs) + tensor_indices = ctx.tensor_indices + tensors = ctx.saved_tensors + + # Fill in inputs with appropriate saved tensors. + # Fill the flash attention output first + # inputs[0] should be flash attention output + inputs[0] = get_flash_attn_out_from_global_buffer(-1) + for i, idx in enumerate(tensor_indices): + inputs[idx] = tensors[i] + + # Stash the surrounding rng state, and mimic the state that was + # present at this time during forward. Restore the surrounding state + # when we're done. + rng_devices = [] + if ctx.preserve_rng_state and ctx.had_cuda_in_fwd: + rng_devices = ctx.fwd_gpu_devices + with torch.random.fork_rng(devices=rng_devices, enabled=ctx.preserve_rng_state): + if ctx.preserve_rng_state: + torch.set_rng_state(ctx.fwd_cpu_state) + if ctx.had_cuda_in_fwd: + set_device_states(ctx.fwd_gpu_devices, ctx.fwd_gpu_states) + detached_inputs = detach_variable(tuple(inputs)) + with torch.enable_grad(), \ + torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs), \ + torch.cpu.amp.autocast(**ctx.cpu_autocast_kwargs): + outputs = ctx.run_function(*detached_inputs) + + if isinstance(outputs, torch.Tensor): + outputs = (outputs,) + + # run backward() with only tensor that requires grad + outputs_with_grad = [] + args_with_grad = [] + for i in range(len(outputs)): + if torch.is_tensor(outputs[i]) and outputs[i].requires_grad: + outputs_with_grad.append(outputs[i]) + args_with_grad.append(args[i]) + if len(outputs_with_grad) == 0: + raise RuntimeError( + "none of output has requires_grad=True," + " this checkpoint() is not necessary") + torch.autograd.backward(outputs_with_grad, args_with_grad) + grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else None + for inp in detached_inputs) + + # write flash attention output gradients to buffer + write_gradient_to_flash_attn_out(-1, detached_inputs[0].grad) + + return (None, None) + grads + +def checkpoint_last_module(function, *args, use_reentrant: bool = True, **kwargs): + preserve = kwargs.pop('preserve_rng_state', True) + if kwargs and use_reentrant: + raise ValueError("Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)) + + return CheckpointFunctionLastModule.apply(function, preserve, *args) + + +def llama_layer_forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + compute_attn_only: Optional[bool] = False, + compute_ffn_only: Optional[bool] = False, + residual: Optional[bool] = None, +) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + assert compute_ffn_only or compute_attn_only + + if compute_attn_only: + residual = hidden_states + + if residual.requires_grad: + # register a hook to add the gradient of residual + # from next checkpoint layer when doing recomputation + hook = residual.register_hook(load_and_add_res_grad_hook) + global_hooks.append(hook) + + hidden_states = self.input_layernorm(hidden_states) + + # Flash Attention + bsz, q_len, _ = hidden_states.size() + try: + query_states = self.self_attn.q_proj(hidden_states).view(bsz, q_len, self.self_attn.num_heads, self.self_attn.head_dim).transpose(1, 2) + key_states = self.self_attn.k_proj(hidden_states).view(bsz, q_len, self.self_attn.num_key_value_heads, self.self_attn.head_dim).transpose(1, 2) + value_states = self.self_attn.v_proj(hidden_states).view(bsz, q_len, self.self_attn.num_key_value_heads, self.self_attn.head_dim).transpose(1, 2) + except: + # old transformers versions don't support num_key_value_heads + query_states = self.self_attn.q_proj(hidden_states).view(bsz, q_len, self.self_attn.num_heads, self.self_attn.head_dim).transpose(1, 2) + key_states = self.self_attn.k_proj(hidden_states).view(bsz, q_len, self.self_attn.num_heads, self.self_attn.head_dim).transpose(1, 2) + value_states = self.self_attn.v_proj(hidden_states).view(bsz, q_len, self.self_attn.num_heads, self.self_attn.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + assert past_key_value is None, "past_key_value is not supported" + + cos, sin = self.self_attn.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + # [bsz, nh, t, hd] + assert not output_attentions, "output_attentions is not supported" + assert not use_cache, "use_cache is not supported" + return query_states.contiguous(), key_states.contiguous(), value_states.contiguous(), residual + + elif compute_ffn_only: + hidden_states = self.self_attn.o_proj(rearrange(hidden_states, 'b h s d -> b s (h d)')) + # Need to add residual here to make sure checkpoint is right after attention + if residual.requires_grad: + # save the gradient of residual to the local buffer + # collect the hooks which should be removed after backward to avoid memory leak + hook = residual.register_hook(save_res_grad_hook) + global_hooks.append(hook) + + hidden_states = residual + hidden_states + + # Fully Connected + + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + else: + raise AttributeError + + return outputs + + +def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + return_dict: Optional[bool] = None, +): + assert cache_position is None, "cache_position is not supported" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + # embed positions + attention_mask = None + + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + try: + logger.warning_once( + "***** Using fast gradient checkpointing... *****" + ) + except: + pass + # initialize the global buffer + init_flash_attn_buffers(len(self.layers)) + + if use_cache: + try: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + except: + pass + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + # apply flash-attention friendly gradient checkpointing + if self.gradient_checkpointing and self.training: + for idx in range(len(self.layers) + 1): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + def forward_first_attn_module(module): + def custom_forward(*inputs): + hidden_states, attention_mask, position_ids, _ = inputs + # None for past_key_value + return module(hidden_states, attention_mask, position_ids, past_key_value, output_attentions, compute_attn_only=True) + return custom_forward + + def forward_ffn_attn_layer(module1, module2): + def custom_forward(*inputs): + hidden_states, attention_mask, position_ids, residual = inputs + # None for past_key_value + layer_outputs = module1(hidden_states, attention_mask, position_ids, past_key_value, output_attentions, compute_ffn_only=True, residual=residual) + hidden_states = layer_outputs[0] + return module2(hidden_states, attention_mask, position_ids, past_key_value, output_attentions, compute_attn_only=True) + return custom_forward + + def forward_last_ffn_module(module): + def custom_forward(*inputs): + hidden_states, attention_mask, position_ids, residual = inputs + # None for past_key_value + return module(hidden_states, attention_mask, position_ids, past_key_value, output_attentions, compute_ffn_only=True, residual=residual) + return custom_forward + + if idx == 0: + layer_outputs = checkpoint_end_with_flash_attention( + forward_first_attn_module(self.layers[0]), + idx, + hidden_states, + attention_mask, + position_ids, + None, + ) + hidden_states, residual = layer_outputs[0], layer_outputs[-1] + elif idx == len(self.layers): + layer_outputs = checkpoint_last_module( + forward_last_ffn_module(self.layers[-1]), + hidden_states, + attention_mask, + position_ids, + residual, + ) + hidden_states = layer_outputs[0] + else: + layer_outputs = checkpoint_end_with_flash_attention( + forward_ffn_attn_layer(self.layers[idx-1], self.layers[idx]), + idx, + hidden_states, + attention_mask, + position_ids, + residual, + ) + hidden_states, residual = layer_outputs[0], layer_outputs[-1] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + else: + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +def apply_dist_flash_attn_monkey_patch_llama(): + initialize_distributed() + transformers.models.llama.modeling_llama.LlamaModel.forward = forward + transformers.models.llama.modeling_llama.LlamaDecoderLayer.forward = llama_layer_forward diff --git a/vision_niah_d/easy_context/dist_flash_attn/prepare_input.py b/vision_niah_d/easy_context/dist_flash_attn/prepare_input.py new file mode 100644 index 0000000000000000000000000000000000000000..050d718ac3575471b3f98d503d3ab6c016adb2a7 --- /dev/null +++ b/vision_niah_d/easy_context/dist_flash_attn/prepare_input.py @@ -0,0 +1,36 @@ + + +def extract_local(value, rank, world_size, device, dim=1): + value_local = value.chunk(world_size, dim=dim)[rank] + return value_local.to(device) + + +def prepare_dist_flash_attn_inputs( + input_ids, position_ids, target_ids, rank, world_size, device +): + local_input_ids = extract_local( + input_ids, + rank, + world_size, + device, + ) + local_position_ids = extract_local( + position_ids, + rank, + world_size, + device, + ) + if target_ids is not None: + local_target_ids = extract_local( + target_ids, + rank, + world_size, + device, + ) + else: + local_target_ids = None + return { + "local_input_ids": local_input_ids, + "local_position_ids": local_position_ids, + "local_target_ids": local_target_ids, + } \ No newline at end of file diff --git a/vision_niah_d/easy_context/low_mem_cross_ent.py b/vision_niah_d/easy_context/low_mem_cross_ent.py new file mode 100644 index 0000000000000000000000000000000000000000..1ef5abc54c7283b42bde8860ca033473b9915344 --- /dev/null +++ b/vision_niah_d/easy_context/low_mem_cross_ent.py @@ -0,0 +1,94 @@ +"""Low memory cross entropy without materilizing the logits + +This module enables long-context training of large vocab models, e.g., Gemma has 250K vocab and Llama 3 has 150K + +Yao Fu, University of Edinburgh +yao.fu@ed.ac.uk +""" + +import torch +import torch.nn.functional as F + + +def cross_ent_normal(x, weight, labels): + logits = torch.einsum("bsh, vh -> bsv", x, weight) + vocab = weight.size(0) + loss = F.cross_entropy(logits.view(-1, vocab), labels.view(-1)) + return loss + +class LowMemLogitProjCrossEnt(torch.autograd.Function): + """Low memory implementation of logits projection plus cross entropy loss. + Useful for reducing the peak memory when dealing with vocabulary larger than 100000 + + TODO: integrate this function into easy context + + Two tricks used here + 1. Shard the data to reduce peak memory + 2. Do not save the logits + """ + + @staticmethod + # @torch.compile() # Currently we do not use torch.compile because it uses additional memory + def forward(ctx, x: torch.Tensor, weight: torch.Tensor, labels: torch.Tensor, sp: int=4): + """ + Args: + x: size = [batch, seqlen, hidden] + weight: size = [vocab, hidden] + labels: size = [batch, seqlen] + """ + bsz, seqlen, hidden = x.size() + vocab = weight.size(0) + micro_seqlen = seqlen // sp + + loss = 0 + for i in range(sp): # shard data along the sequence dimension + logits_i_slice = torch.einsum("bsh, vh -> bsv", x[:, micro_seqlen * i: micro_seqlen * (i + 1)], weight) + loss_i = F.cross_entropy(logits_i_slice.view(-1, vocab), labels[:, micro_seqlen * i: micro_seqlen * (i + 1)].view(-1)) + loss = loss + loss_i + + loss = loss / sp + ctx.save_for_backward(x, weight, labels) # because we do no save logits, we save memory + ctx.sp = sp + return loss + + # @torch.compile() + @staticmethod + def backward(ctx, grad_output): + """Manually calculate the gradient in a memory-efficient way + Ref: https://indii.org/blog/gradients-of-softmax-and-logsumexp/ + """ + x, weight, labels = ctx.saved_tensors + sp = ctx.sp + device = x.device + dtype = x.dtype + bsz, seqlen, hidden = x.size() + vocab, hidden = weight.size() + micro_seqlen = seqlen // sp + + d_weight = torch.zeros_like(weight, device=weight.device) + d_x = [] + for i in range(sp): # shard data along sequence dimension, reduce peak memory + x_ = x[:, micro_seqlen * i: micro_seqlen * (i + 1)] + p = F.softmax( + torch.einsum("blh, vh -> blv", x_, weight), + dim=-1 + ) + + # memory efficient in-place backprop + # loss -> d_logits + d_logits = -p.view(-1) # [b * l * v] + labels_ = labels[:, micro_seqlen * i: micro_seqlen * (i + 1)].view(-1) # [b * l] + index = torch.arange(bsz * micro_seqlen, device=device) * vocab + labels_ + source = torch.tensor([1] * bsz * micro_seqlen, dtype=dtype, device=device) + d_logits.index_add_(0, index, source) + d_logits = -d_logits.view(bsz, micro_seqlen, vocab) / (bsz * seqlen) + + # d_logits -> d_x and d_weight + d_x.append(torch.einsum("blv, vh -> blh", d_logits, weight)) + d_weight += torch.einsum("blv, blh -> vh", d_logits, x_) + + d_weight = grad_output * d_weight + d_x = grad_output * torch.concat(d_x, 1) + return d_x, d_weight, None, None + +low_mem_cross_ent = LowMemLogitProjCrossEnt.apply \ No newline at end of file diff --git a/vision_niah_d/easy_context/low_mem_cross_ent_tests/test_correctness.py b/vision_niah_d/easy_context/low_mem_cross_ent_tests/test_correctness.py new file mode 100644 index 0000000000000000000000000000000000000000..5a7f10b5085f18ca917e4d1e8eb317c11cb72820 --- /dev/null +++ b/vision_niah_d/easy_context/low_mem_cross_ent_tests/test_correctness.py @@ -0,0 +1,81 @@ +"""Test the correctness (up to certain tolerance of numerical error) of low-memory cross-ent + +Yao Fu, University of Edinburgh +yao.fu@ed.ac.uk +""" + +import sys +sys.path.append("..") + +import torch +import torch.nn.functional as F +from low_mem_cross_ent import low_mem_cross_ent, cross_ent_normal + +bsz = 1 +seqlen = 50000 +hidden = 4096 +vocab = 15000 +dtype = torch.bfloat16 +rtol=1e-05 # relative tolerance when comparing the gradients from two implementations +atol=1e-07 # absolute tolerance when comparing the gradients from two implementations + # in Pytorch its default is 1e-8 but our implementation cannot pass this threshold + # 1e-7 seems to be the smallest rolerance we can pass + +x = torch.normal(mean=0, std=0.01, size=(bsz, seqlen, hidden), + device="cuda", dtype=dtype, requires_grad=True) +weight = torch.normal(mean=0, std=0.01, size=(vocab, hidden), + device="cuda", dtype=dtype, requires_grad=True) +labels = torch.randint(low=0, high=vocab - 1, size=(bsz, seqlen), device="cuda") + +loss_normal = cross_ent_normal(x, weight, labels) +print("loss normal: %.4f" % loss_normal.cpu().item()) +loss_normal.backward() +x_grad = x.grad.clone() +weight_grad = weight.grad.clone() +# print(x.grad) +# print(weight.grad) + + +# TODO: this one almost reduce memory to half. Maybe further increase sp +x.grad = None +weight.grad = None +loss_low_mem = low_mem_cross_ent(x, weight, labels) +print("loss low mem: %.4f" % loss_low_mem.cpu().item()) +loss_low_mem.backward() +# print(x.grad) +# print(weight.grad) + +## Test implementation by asserting close +assert(torch.allclose(x_grad, x.grad, rtol=rtol, atol=atol)) +assert(torch.allclose(weight_grad, weight.grad, rtol=rtol, atol=atol)) +print("PASS: gradients from normal computation and low memory computation are close.") + + +# #### Test gradient of logits +# x.grad = None +# weight.grad = None +# logits = torch.einsum("bsh, vh -> bsv", x, weight) +# loss = F.cross_entropy(logits.view(-1, vocab), labels.view(-1)) +# d_logits = torch.autograd.grad(loss, logits) +# p = F.softmax(torch.einsum("blh, vh -> blv", x, weight), dim=-1) +# p_ = p / (bsz * seqlen) + +# #### test index add +# x = torch.tensor([1, 2, 3, 4, 5, 6, 7]) +# index = torch.tensor([1, 3, 4]) +# source = torch.tensor([1, 1, 1]) +# x.index_add_(dim=0, index=index, source=source) + +# #### test index add 2 +# sp = 4 +# micro_seqlen = seqlen // sp +# p = torch.normal(mean=0, std=0.01, size=(bsz, micro_seqlen, vocab), +# device="cuda", dtype=torch.bfloat16) +# labels_ = labels[:, :micro_seqlen].view(-1) +# index = torch.arange(bsz * micro_seqlen, device="cuda") * vocab +# index += labels_ +# d_logits = -p.view(-1) +# source = torch.tensor([1] * bsz * micro_seqlen, dtype=torch.bfloat16, device="cuda") +# d_logits.index_add_(0, index, source) +# d_logits = d_logits.view(bsz, micro_seqlen, vocab) + diff --git a/vision_niah_d/easy_context/low_mem_cross_ent_tests/test_mem_and_speed.py b/vision_niah_d/easy_context/low_mem_cross_ent_tests/test_mem_and_speed.py new file mode 100644 index 0000000000000000000000000000000000000000..3cf02838f728a6313e8b8124ea4e9f623ceba505 --- /dev/null +++ b/vision_niah_d/easy_context/low_mem_cross_ent_tests/test_mem_and_speed.py @@ -0,0 +1,80 @@ +"""Test the memory and speed and MFU of low memory cross entropy + +Yao Fu, University of Edinburgh +yao.fu@ed.ac.uk + +bf16, seqlen=50000, vocab=150000, without torch.compile +| | normal | low_mem | - | +| sp | - | 4 | 16 | +| peak mem | 43.4G | 18.5G | 8.1G | +| forward | 0.307 | 0.310 | 0.315 | +| backward | 0.631 | 0.896 | 0.914 | +| MFU | 0.57 | 0.45 | 0.44 | + +NOTE: tried torch.compile and it takes significantly larger memory, so do not use +TODO: profile and check why backward is slower +""" +import sys +sys.path.append("..") + +import torch +import numpy as np +import torch.nn.functional as F +from low_mem_cross_ent import low_mem_cross_ent, cross_ent_normal + +implementation = "low_mem" # "normal", "low_mem" +device_type = "A100" +bsz = 1 +seqlen = 50000 +hidden = 4096 +vocab = 150000 +sp=16 +dtype = torch.bfloat16 +# dtype = torch.float +G = 1024 ** 3 +T = 1024 ** 4 + +x = torch.normal(mean=0, std=0.01, size=(bsz, seqlen, hidden), + device="cuda", dtype=dtype, requires_grad=True) +weight = torch.normal(mean=0, std=0.01, size=(vocab, hidden), + device="cuda", dtype=dtype, requires_grad=True) +labels = torch.randint(low=0, high=vocab - 1, size=(bsz, seqlen), device="cuda") + +def timed(fn): + start = torch.cuda.Event(enable_timing=True) + end = torch.cuda.Event(enable_timing=True) + start.record() + result = fn() + end.record() + torch.cuda.synchronize() + return result, start.elapsed_time(end) / 1000 + +n_runs = 50 +flop = 6 * bsz * seqlen * hidden * vocab +if(implementation == "normal"): + forward_times, backward_times = [], [] + for _ in range(n_runs): + loss_normal, time_elapse = timed(lambda: cross_ent_normal(x, weight, labels)) + forward_times.append(time_elapse) + _, time_elapse = timed(lambda: loss_normal.backward()) + backward_times.append(time_elapse) + mem = torch.cuda.max_memory_allocated() +elif(implementation == "low_mem"): + forward_times, backward_times = [], [] + for _ in range(n_runs): + loss_low_mem, time_elapse = timed(lambda: low_mem_cross_ent(x, weight, labels, sp)) + forward_times.append(time_elapse) + _, time_elapse = timed(lambda: loss_low_mem.backward()) + backward_times.append(time_elapse) + mem = torch.cuda.max_memory_allocated() +else: raise NameError("Implementation %s not recognized" % implementation) + +forward_time = np.median(forward_times) +backward_time = np.median(backward_times) +flops = (flop / T) / (forward_time + backward_time) +if(device_type == "A100"): + device_flop = 312 +else: raise NameError("device %s not recognized" % device_type) + +print("%s, peak memory %.1fG, forward time %.4f, backward time %.4f, flops %.2fT, util %.2f" % + (implementation, mem / G, forward_time, backward_time, flops, flops / device_flop)) diff --git a/vision_niah_d/easy_context/modeling_qwen2.py b/vision_niah_d/easy_context/modeling_qwen2.py new file mode 100644 index 0000000000000000000000000000000000000000..5ca5becce2d6e21191b0588f39201e22ab5e5bc9 --- /dev/null +++ b/vision_niah_d/easy_context/modeling_qwen2.py @@ -0,0 +1,1397 @@ +# coding=utf-8 +# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch Qwen2 model.""" + +import inspect +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.cache_utils import Cache, DynamicCache +from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from transformers.models.qwen2.configuration_qwen2 import Qwen2Config +from .low_mem_cross_ent import low_mem_cross_ent + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters) +from ring_flash_attn.zigzag_ring_flash_attn import zigzag_ring_flash_attn_func + + +logger = logging.get_logger(__name__) + + +_CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta" +_CONFIG_FOR_DOC = "Qwen2Config" + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2 +class Qwen2RMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + Qwen2RMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +# Copied from transformers.models.mixtral.modeling_mixtral.MixtralRotaryEmbedding with Mixtral->Qwen2 +class Qwen2RotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + # For BC we register cos and sin cached + self.max_seq_len_cached = max_position_embeddings + + @torch.no_grad() + def forward(self, x, position_ids): + # x: [bs, num_attention_heads, seq_len, head_size] + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) + position_ids_expanded = position_ids[:, None, :].float() + # Force float32 since bfloat16 loses precision on long contexts + # See https://github.com/huggingface/transformers/pull/29285 + device_type = x.device.type + device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() + sin = emb.sin() + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +# Copied from transformers.models.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +# Copied from transformers.models.mixtral.modeling_mixtral.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`, *optional*): + Deprecated and unused. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +# Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2 +class Qwen2MLP(nn.Module): + def __init__(self, config): + super().__init__() + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, hidden_state): + return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state)) + + +# Copied from transformers.models.llama.modeling_llama.repeat_kv +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class Qwen2Attention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer + and "Generating Long Sequences with Sparse Transformers". + """ + + def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " + "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.is_causal = True + self.attention_dropout = config.attention_dropout + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + + self.rotary_emb = Qwen2RotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class Qwen2FlashAttention2(Qwen2Attention): + """ + Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention` + as the weights of the module stays untouched. The only required change would be on the forward pass + where it needs to correctly call the public API of flash attention and deal with padding tokens + in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom + config.max_window_layers layers. + """ + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + ): + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + + # Because the input can be padded, the absolute sequence length depends on the max position id. + rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1 + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + use_sliding_windows = ( + _flash_supports_window_size + and getattr(self.config, "sliding_window", None) is not None + and kv_seq_len > self.config.sliding_window + and self.config.use_sliding_window + ) + + if not _flash_supports_window_size: + logger.warning_once( + "The current flash attention version does not support sliding window attention, for a more memory efficient implementation" + " make sure to upgrade flash-attn library." + ) + + if past_key_value is not None: + # Activate slicing cache only if the config has a value `sliding_windows` attribute + cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0 + if ( + getattr(self.config, "sliding_window", None) is not None + and kv_seq_len > self.config.sliding_window + and cache_has_contents + ): + slicing_tokens = 1 - self.config.sliding_window + + past_key = past_key_value[self.layer_idx][0] + past_value = past_key_value[self.layer_idx][1] + + past_key = past_key[:, :, slicing_tokens:, :].contiguous() + past_value = past_value[:, :, slicing_tokens:, :].contiguous() + + if past_key.shape[-2] != self.config.sliding_window - 1: + raise ValueError( + f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" + f" {past_key.shape}" + ) + + if attention_mask is not None: + attention_mask = attention_mask[:, slicing_tokens:] + attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) + + cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + dropout_rate = 0.0 if not self.training else self.attention_dropout + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in float16 just to be sure everything works as expected. + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + # Reashape to the expected shape for Flash Attention + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + attn_output = self._flash_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + q_len, + dropout=dropout_rate, + use_sliding_windows=use_sliding_windows, + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + def _flash_attention_forward( + self, + query_states, + key_states, + value_states, + attention_mask, + query_length, + dropout=0.0, + softmax_scale=None, + use_sliding_windows=False, + ): + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + assert attention_mask is None + assert causal is True + assert use_sliding_windows is False + attn_output = zigzag_ring_flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale, + causal=causal, + ) + + return attn_output + + # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape + + # On the first iteration we need to properly re-create the padding mask + # by slicing it on the proper place + if kv_seq_len != attention_mask.shape[-1]: + attention_mask_num_tokens = attention_mask.shape[-1] + attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :] + + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + + key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) + value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) + + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +# Copied from transformers.models.mixtral.modeling_mixtral.MixtralSdpaAttention with Mixtral->Qwen2 +class Qwen2SdpaAttention(Qwen2Attention): + """ + Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from Qwen2Attention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and attention_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment + # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. + # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. + is_causal = True if self.is_causal and attention_mask is None and q_len > 1 else False + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=attention_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + is_causal=is_causal, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +QWEN2_ATTENTION_CLASSES = { + "eager": Qwen2Attention, + "flash_attention_2": Qwen2FlashAttention2, + "sdpa": Qwen2SdpaAttention, +} + + +class Qwen2DecoderLayer(nn.Module): + def __init__(self, config: Qwen2Config, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + if config.use_sliding_window and config._attn_implementation != "flash_attention_2": + logger.warning_once( + f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; " + "unexpected results may be encountered." + ) + self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) + + self.mlp = Qwen2MLP(config) + self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, sequence_length)` where padding elements are indicated by 0. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + assert isinstance( + self.self_attn, Qwen2FlashAttention2 + ) + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +QWEN2_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`Qwen2Config`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.", + QWEN2_START_DOCSTRING, +) +class Qwen2PreTrainedModel(PreTrainedModel): + config_class = Qwen2Config + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["Qwen2DecoderLayer"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_cache_class = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +QWEN2_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.", + QWEN2_START_DOCSTRING, +) +class Qwen2Model(Qwen2PreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`] + + Args: + config: Qwen2Config + """ + + def __init__(self, config: Qwen2Config): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList( + [Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self._attn_implementation = config._attn_implementation + self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + past_key_values_length = 0 + + if use_cache: + use_legacy_cache = not isinstance(past_key_values, Cache) + if use_legacy_cache: + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + past_key_values_length = past_key_values.get_usable_length(seq_length) + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache: + is_padding_right = attention_mask[:, -1].sum().item() != batch_size + if is_padding_right: + raise ValueError( + "You are attempting to perform batched generation with padding_side='right'" + " this may lead to unexpected behaviour for Flash Attention version of Qwen2. Make sure to " + " call `tokenizer.padding_side = 'left'` before tokenizing the input. " + ) + + if self._attn_implementation == "flash_attention_2": + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + elif self._attn_implementation == "sdpa" and not output_attentions: + # output_attentions=True can not be supported when using SDPA, and we fall back on + # the manual implementation that requires a 4D causal mask in all cases. + attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (batch_size, seq_length), + inputs_embeds, + past_key_values_length, + sliding_window=self.config.sliding_window, + ) + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, + (batch_size, seq_length), + inputs_embeds, + past_key_values_length, + sliding_window=self.config.sliding_window, + ) + + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache + + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class Qwen2ForCausalLM_RingAttn(Qwen2PreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = Qwen2Model(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, Qwen2ForCausalLM + + >>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + + loss, logits = None, None + if labels is not None: + # Shift so that tokens < n predict n + # shift_labels = labels[:, 1:].to(hidden_states.device) + loss = low_mem_cross_ent(hidden_states, + self.lm_head.weight, + labels.to(hidden_states.device), + 16 + ) + logits = self.lm_head(hidden_states).float() + # loss = None + # if labels is not None: + # # Shift so that tokens < n predict n + # shift_logits = logits[..., :-1, :].contiguous() + # shift_labels = labels[..., 1:].contiguous() + # # Flatten the tokens + # loss_fct = CrossEntropyLoss() + # shift_logits = shift_logits.view(-1, self.config.vocab_size) + # shift_labels = shift_labels.view(-1) + # # Enable model parallelism + # shift_labels = shift_labels.to(shift_logits.device) + # loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + past_length = 0 + # Omit tokens covered by past_key_values + if past_key_values is not None: + # Past key values are always initialized with a `Cache` object -> no need for if-else anymore + cache_length = past_key_values.get_seq_length() + past_length = past_key_values.seen_tokens + max_cache_length = past_key_values.get_max_length() + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and cache_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_length == 0: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings( + """ + The Qwen2 Model transformer with a sequence classification head on top (linear layer). + + [`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + QWEN2_START_DOCSTRING, +) +class Qwen2ForSequenceClassification(Qwen2PreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = Qwen2Model(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@add_start_docstrings( + """ + The Qwen2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states + output) e.g. for Named-Entity-Recognition (NER) tasks. + """, + QWEN2_START_DOCSTRING, +) +# Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Qwen2, LLAMA->QWEN2 +class Qwen2ForTokenClassification(Qwen2PreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = Qwen2Model(config) + if getattr(config, "classifier_dropout", None) is not None: + classifier_dropout = config.classifier_dropout + elif getattr(config, "hidden_dropout", None) is not None: + classifier_dropout = config.hidden_dropout + else: + classifier_dropout = 0.1 + self.dropout = nn.Dropout(classifier_dropout) + self.score = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + sequence_output = self.dropout(sequence_output) + logits = self.score(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) \ No newline at end of file diff --git a/vision_niah_d/easy_context/ulysses_attn/__pycache__/monkey_patch.cpython-310.pyc b/vision_niah_d/easy_context/ulysses_attn/__pycache__/monkey_patch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36bb35fe6a729d09d6b1abee02506fadc4897993 Binary files /dev/null and b/vision_niah_d/easy_context/ulysses_attn/__pycache__/monkey_patch.cpython-310.pyc differ diff --git a/vision_niah_d/easy_context/ulysses_attn/__pycache__/prepare_inputs.cpython-310.pyc b/vision_niah_d/easy_context/ulysses_attn/__pycache__/prepare_inputs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c0646ae78b79115621b66b8b727fd6fe5966618 Binary files /dev/null and b/vision_niah_d/easy_context/ulysses_attn/__pycache__/prepare_inputs.cpython-310.pyc differ diff --git a/vision_niah_d/easy_context/ulysses_attn/monkey_patch.py b/vision_niah_d/easy_context/ulysses_attn/monkey_patch.py new file mode 100644 index 0000000000000000000000000000000000000000..792ae946e715b27ac625738c5e14ce72bb2dcf4f --- /dev/null +++ b/vision_niah_d/easy_context/ulysses_attn/monkey_patch.py @@ -0,0 +1,110 @@ +import transformers +from typing import List, Optional, Tuple, Union +import warnings +import torch +import torch.utils.checkpoint +try: + from yunchang.ulysses import UlyssesAttention + ulysses_attn = UlyssesAttention() +except: + ulysses_attn = None + + +def new_flash_attn_forward( + self, + query_states, + key_states, + value_states, + attention_mask, + query_length, + dropout=0.0, + softmax_scale=None, + use_sliding_windows=False, +): + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + assert attention_mask is None + assert causal is True + assert use_sliding_windows is False + attn_output = ulysses_attn( + query_states, + key_states, + value_states, + dropout, + softmax_scale, + causal=causal, + ) + + return attn_output + + +def new_decoder_forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, +) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + assert isinstance( + self.self_attn, transformers.models.llama.modeling_llama.LlamaFlashAttention2 + ) or isinstance( + self.self_attn, + transformers.models.mistral.modeling_mistral.MistralFlashAttention2, + ), "Please toggle on the Flash Attention 2 implementation when using zigzag ring attention monkey patch." + + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +def apply_ulysses_attn_monkey_patch_llama(): + transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward = ( + new_flash_attn_forward + ) + transformers.models.llama.modeling_llama.LlamaDecoderLayer.forward = ( + new_decoder_forward + ) + + diff --git a/vision_niah_d/easy_context/ulysses_attn/prepare_inputs.py b/vision_niah_d/easy_context/ulysses_attn/prepare_inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..4a6ad7d11a3861a0022db5f783a5eb15235309f0 --- /dev/null +++ b/vision_niah_d/easy_context/ulysses_attn/prepare_inputs.py @@ -0,0 +1,45 @@ +import torch + + +def extract_local(value, rank, world_size, device, dim=1): + dimension_size = value.shape[dim] + sub_seq_length = dimension_size // world_size + + sub_seq_start = rank * sub_seq_length + sub_seq_end = (rank + 1) * sub_seq_length + local_value = value[:, sub_seq_start:sub_seq_end] + + return local_value.to(device) + + +def prepare_ulysses_attn_inputs( + input_ids, position_ids, target_ids, rank, world_size, device +): + + local_input_ids = extract_local( + input_ids, + rank, + world_size, + device, + ) + local_position_ids = extract_local( + position_ids, + rank, + world_size, + device, + ) + + if target_ids is not None: + local_target_ids = extract_local( + target_ids, + rank, + world_size, + device, + ) + else: + local_target_ids = None + return { + "local_input_ids": local_input_ids, + "local_position_ids": local_position_ids, + "local_target_ids": local_target_ids, + } diff --git a/vision_niah_d/easy_context/unsloth_offloaded_gradient_checkpoint/__pycache__/monkey_patch.cpython-310.pyc b/vision_niah_d/easy_context/unsloth_offloaded_gradient_checkpoint/__pycache__/monkey_patch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2850dbfc0c5783ba7f7d34fbdbf2f60699108cfb Binary files /dev/null and b/vision_niah_d/easy_context/unsloth_offloaded_gradient_checkpoint/__pycache__/monkey_patch.cpython-310.pyc differ diff --git a/vision_niah_d/easy_context/unsloth_offloaded_gradient_checkpoint/monkey_patch.py b/vision_niah_d/easy_context/unsloth_offloaded_gradient_checkpoint/monkey_patch.py new file mode 100644 index 0000000000000000000000000000000000000000..fb509e0ef2a0103d74b887ae02954cfb84f8a9a8 --- /dev/null +++ b/vision_niah_d/easy_context/unsloth_offloaded_gradient_checkpoint/monkey_patch.py @@ -0,0 +1,94 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import transformers +import inspect + + +class Unsloth_Offloaded_Gradient_Checkpointer(torch.autograd.Function): + """ + Saves VRAM by smartly offloading to RAM. + Tiny hit to performance, since we mask the movement via non blocking calls. + """ + + @staticmethod + @torch.cuda.amp.custom_fwd + def forward(ctx, forward_function, hidden_states, *args): + saved_hidden_states = hidden_states.to("cpu", non_blocking=True) + with torch.no_grad(): + output = forward_function(hidden_states, *args) + ctx.save_for_backward(saved_hidden_states) + ctx.forward_function = forward_function + ctx.args = args + + return output + + pass + + @staticmethod + @torch.cuda.amp.custom_bwd + def backward(ctx, dY): + (hidden_states,) = ctx.saved_tensors + hidden_states = hidden_states.to("cuda", non_blocking=True).detach() + hidden_states.requires_grad = True + with torch.enable_grad(): + (output,) = ctx.forward_function(hidden_states, *ctx.args) + torch.autograd.backward(output, dY) + return ( + None, + hidden_states.grad, + ) + ( + None, + ) * len(ctx.args) + + pass + + +pass + + +def new_gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): + assert gradient_checkpointing_kwargs == None + if not self.supports_gradient_checkpointing: + raise ValueError( + f"{self.__class__.__name__} does not support gradient checkpointing." + ) + + gradient_checkpointing_func = Unsloth_Offloaded_Gradient_Checkpointer.apply + # For old GC format (transformers < 4.35.0) for models that live on the Hub + # we will fall back to the overwritten `_set_gradient_checkpointing` method + _is_using_old_format = ( + "value" in inspect.signature(self._set_gradient_checkpointing).parameters + ) + + if not _is_using_old_format: + self._set_gradient_checkpointing( + enable=True, gradient_checkpointing_func=gradient_checkpointing_func + ) + else: + raise NotImplementedError() + + if getattr(self, "_hf_peft_config_loaded", False): + # When using PEFT + gradient checkpointing + Trainer we need to make sure the input has requires_grad=True + # we do it also on PEFT: https://github.com/huggingface/peft/blob/85013987aa82aa1af3da1236b6902556ce3e483e/src/peft/peft_model.py#L334 + # When training with PEFT, only LoRA layers will have requires grad set to True, but the output of frozen layers need to propagate + # the gradients to make sure the gradient flows. + self.enable_input_require_grads() + + +def apply_unsloth_offloaded_gradient_checkpoint_monkey_patch(): + transformers.modeling_utils.PreTrainedModel.gradient_checkpointing_enable = ( + new_gradient_checkpointing_enable + ) diff --git a/vision_niah_d/easy_context/zigzag_ring_attn/__pycache__/monkey_patch.cpython-310.pyc b/vision_niah_d/easy_context/zigzag_ring_attn/__pycache__/monkey_patch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acb52b84db7b77fee342b1622f8f1bb149864e87 Binary files /dev/null and b/vision_niah_d/easy_context/zigzag_ring_attn/__pycache__/monkey_patch.cpython-310.pyc differ diff --git a/vision_niah_d/easy_context/zigzag_ring_attn/__pycache__/prepare_inputs.cpython-310.pyc b/vision_niah_d/easy_context/zigzag_ring_attn/__pycache__/prepare_inputs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b426368d74b0c9ec3b408a09852b4e1880d9860e Binary files /dev/null and b/vision_niah_d/easy_context/zigzag_ring_attn/__pycache__/prepare_inputs.cpython-310.pyc differ diff --git a/vision_niah_d/easy_context/zigzag_ring_attn/monkey_patch.py b/vision_niah_d/easy_context/zigzag_ring_attn/monkey_patch.py new file mode 100644 index 0000000000000000000000000000000000000000..4ebcdb7d054f6d7b199ed4bbdc304cd6e73e7df0 --- /dev/null +++ b/vision_niah_d/easy_context/zigzag_ring_attn/monkey_patch.py @@ -0,0 +1,113 @@ +import transformers +from typing import List, Optional, Tuple, Union +import warnings +import torch +import torch.utils.checkpoint +from ring_flash_attn.zigzag_ring_flash_attn import zigzag_ring_flash_attn_func + + +def new_flash_attn_forward( + self, + query_states, + key_states, + value_states, + attention_mask, + query_length, + dropout=0.0, + softmax_scale=None, + use_sliding_windows=False, +): + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + assert attention_mask is None + assert causal is True + assert use_sliding_windows is False + attn_output = zigzag_ring_flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale, + causal=causal, + ) + + return attn_output + + +def new_decoder_forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, +) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + assert isinstance( + self.self_attn, transformers.models.llama.modeling_llama.LlamaFlashAttention2 + ) or isinstance( + self.self_attn, + transformers.models.mistral.modeling_mistral.MistralFlashAttention2, + ), "Please toggle on the Flash Attention 2 implementation when using zigzag ring attention monkey patch." + + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +def apply_zigzag_ring_attn_monkey_patch_llama(): + transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward = ( + new_flash_attn_forward + ) + transformers.models.llama.modeling_llama.LlamaDecoderLayer.forward = ( + new_decoder_forward + ) + + +def apply_zigzag_ring_attn_monkey_patch_mistral(): + transformers.models.mistral.modeling_mistral.MistralFlashAttention2._flash_attention_forward = ( + new_flash_attn_forward + ) + transformers.models.mistral.modeling_mistral.MistralDecoderLayer.forward = ( + new_decoder_forward + ) diff --git a/vision_niah_d/easy_context/zigzag_ring_attn/prepare_inputs.py b/vision_niah_d/easy_context/zigzag_ring_attn/prepare_inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..e7f8f1c50a1757d419224003e75f0803ef9f64bf --- /dev/null +++ b/vision_niah_d/easy_context/zigzag_ring_attn/prepare_inputs.py @@ -0,0 +1,41 @@ +import torch + + +def extract_local(value, rank, world_size, device, dim=1): + value_chunks = value.chunk(2 * world_size, dim=dim) + local_value = torch.cat( + [value_chunks[rank], value_chunks[2 * world_size - rank - 1]], dim=dim + ) + return local_value.to(device) + + +def prepare_zigzag_ring_attn_inputs( + input_ids, position_ids, target_ids, rank, world_size, device +): + local_input_ids = extract_local( + input_ids, + rank, + world_size, + device, + ) + local_position_ids = extract_local( + position_ids, + rank, + world_size, + device, + dim=2 + ) + if target_ids is not None: + local_target_ids = extract_local( + target_ids, + rank, + world_size, + device, + ) + else: + local_target_ids = None + return { + "local_input_ids": local_input_ids, + "local_position_ids": local_position_ids, + "local_target_ids": local_target_ids, + } diff --git a/vision_niah_d/eval_debug.sh b/vision_niah_d/eval_debug.sh new file mode 100644 index 0000000000000000000000000000000000000000..752e9bb98a53cf9c900ea176524c81cbf447aeec --- /dev/null +++ b/vision_niah_d/eval_debug.sh @@ -0,0 +1,47 @@ +#!/bin/bash +set -x + +models=( + # "/mnt/petrelfs/weixilin/cache/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video" + # "/mnt/petrelfs/weixilin/cache/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video" + "/mnt/petrelfs/weixilin/cache/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video" + "/mnt/petrelfs/weixilin/cache/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video" + # "/mnt/petrelfs/weixilin/cache/Qwen2-VL-t_only-128frames-16card_8k-context-330k-llava-video" +) +rope_types=( + # "t_scale2_change_freq" + "vanilla_rope" + "time_rope" + # "t_only" + ) + +base_port=6015 + +for i in "${!models[@]}"; do + model=${models[$i]} + rope_type=${rope_types[$i]} + + port=$((base_port + i)) + + echo "evaluating model: $model" + echo "using rope_type: $rope_type" + echo "port: $port" + + accelerate launch --num_processes 8 --config_file vision_niah_d/easy_context/accelerate_configs/deepspeed_inference.yaml \ + --main_process_port "$port" vision_niah_d/eval_vision_niah.py \ + --model "$model" \ + --needle_dataset vision_niah_d/needle_datasets/dataset.json \ + --needle_embedding_dir vision_niah_d/video_needle_haystack/data/needle_qwen2_embeddings_144tokens_dataset \ + --haystack_dir vision_niah_d/video_needle_haystack/data/haystack_qwen2_embeddings_6000frames \ + --prompt_template qwen2 \ + --max_frame_num 3000 \ + --min_frame_num 100 \ + --frame_interval 200 \ + --output_path vision_niah_d/niah_output/ \ + --rope_type "$rope_type" \ + --image_tokens 144 \ + --depth_interval 0.2 + + echo "model $model evaluation has done." + echo "------------------------------------" +done diff --git a/vision_niah_d/eval_debug_interrupt.sh b/vision_niah_d/eval_debug_interrupt.sh new file mode 100644 index 0000000000000000000000000000000000000000..0636da275d25a8c7b3f42904377d200cd05343ae --- /dev/null +++ b/vision_niah_d/eval_debug_interrupt.sh @@ -0,0 +1,44 @@ +#!/bin/bash +set -x + +models=( + "/mnt/petrelfs/weixilin/cache/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video" +) +rope_types=( + # "m_rope" + "videorope" + ) + +# basic port +base_port=6011 + +# iterate each model +for i in "${!models[@]}"; do + model=${models[$i]} + rope_type=${rope_types[$i]} + + port=$((base_port + i)) + + echo "evaluating model: $model" + echo "using rope_type: $rope_type" + echo "port: $port" + + accelerate launch --num_processes 8 --config_file easy_context/accelerate_configs/deepspeed_inference.yaml \ + --main_process_port "$port" vision_niah_d/eval_vision_niah_interrupt.py \ + --model "$model" \ + --needle_dataset vision_niah_d/needle_datasets/dataset.json \ + --needle_embedding_dir vision_niah_d/video_needle_haystack/data/needle_qwen2_embeddings_144tokens_dataset \ + --needle_embedding_interrupt_dir vision_niah_d/video_needle_haystack/data/needle_qwen2_embeddings_144tokens_dataset_interrupt \ + --haystack_dir vision_niah_d/video_needle_haystack/data/haystack_qwen2_embeddings_6000frames \ + --prompt_template qwen2 \ + --max_frame_num 3000 \ + --min_frame_num 100 \ + --frame_interval 200 \ + --output_path vision_niah_d/niah_output_interrupt \ + --rope_type "$rope_type" \ + --image_tokens 144 \ + --depth_interval 0.2 + + echo "model $model evaluation has done." + echo "------------------------------------" +done diff --git a/vision_niah_d/eval_vision_niah.py b/vision_niah_d/eval_vision_niah.py new file mode 100644 index 0000000000000000000000000000000000000000..a3a37ea6462dd59cd761a3cf376e5b83a1893e58 --- /dev/null +++ b/vision_niah_d/eval_vision_niah.py @@ -0,0 +1,552 @@ +import argparse +import gc +import sys +import torch +from transformers import AutoTokenizer +from transformers import LlamaForCausalLM +from easy_context import Qwen2ForCausalLM_RingAttn +from tqdm import tqdm +from accelerate import Accelerator +import glob +import numpy as np +from tqdm import tqdm +import gc +import matplotlib.pyplot as plt +import os +from matplotlib.colors import LinearSegmentedColormap +import seaborn as sns +import pandas as pd +from pathlib import Path +import random +import json +from datasets import load_dataset +from vision_niah.produce_needle_embedding import read_json_file +from easy_context import ( + prepare_seq_parallel_inputs, + apply_seq_parallel_monkey_patch, +) +from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor +from torchvision import io, transforms +from torchvision.transforms import InterpolationMode +apply_seq_parallel_monkey_patch("zigzag_ring_attn", "llama") + +import sys +import pdb +class ForkedPdb(pdb.Pdb): + """A Pdb subclass that may be used + from a forked multiprocessing child + """ + def interaction(self, *args, **kwargs): + _stdin = sys.stdin + try: + sys.stdin = open('/dev/stdin') + pdb.Pdb.interaction(self, *args, **kwargs) + finally: + sys.stdin = _stdin + +SEED = 24242424 +torch.manual_seed(SEED) +random.seed(SEED) +np.random.seed(SEED) +IMAGE_TOKENS = None +prompt_templates = { + "mistral": { + "preprompt": "[INST]", + "postprompt": " [/INST]" + }, + "vicuna": { + "preprompt": "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER:", + "postprompt": "ASSISTANT:" + }, + "llama3": { + "preprompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n", + "postprompt": "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", + }, + "qwen2": { + "preprompt": "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n", + "postprompt": "<|im_end|>\n<|im_start|>assistant\n", + }, + "yi": { + "preprompt": "<|im_start|>system\nAnswer the questions.<|im_end|>\n<|im_start|>user\n", + "postprompt": "<|im_end|>\n<|im_start|>assistant\n", + }, +} +# \nAnswer the question using a single word or phrase. +# The color of the bottle cap is +# answer = "Yellow" + + +def safe_tokenize(tokenizer, text): + tokenized = tokenizer.encode(text, return_tensors="pt") + if tokenizer.bos_token != None and len(tokenized) > 0 and tokenized[0, 0] == tokenizer.bos_token_id: + tokenized = tokenized[:, 1:] + return tokenized + +def get_vanilla_rope_index(input_embeds, video_se): + return torch.arange(input_embeds.shape[1]).view(1, 1, -1).expand(3, 1, -1) + +def get_time_rope_index(input_embeds, video_se): + llm_pos_ids_list = [] + llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1)) + assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float' + nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS + ## time rope + t_index = torch.arange(llm_pos_ids_list[-1].max().item() + 1, llm_pos_ids_list[-1].max().item() + 1 + nframes).repeat_interleave(IMAGE_TOKENS, dim=0).view(1, 1, -1).expand(3, 1, -1) + llm_pos_ids_list.append(t_index) + if input_embeds.shape[1] > video_se[1]: + text_len = input_embeds.shape[1] - video_se[1] + llm_pos_ids_list.append(torch.arange(t_index.max().item() + 1, text_len + t_index.max().item() + 1).view(1, 1, -1).expand(3, 1, -1)) + position_ids = torch.cat(llm_pos_ids_list, dim=-1) + assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}' + return position_ids + +def get_t_scale2_rope_index(input_embeds, video_se, scale_factor): + llm_pos_ids_list = [] + llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1)) + st_idx = llm_pos_ids_list[-1][0].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float' + nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS + ## m_rope rope + llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16 + + t_index = torch.arange(llm_grid_t).view(-1, 1).expand( + -1, llm_grid_h * llm_grid_w).flatten() + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand( + llm_grid_t, -1, llm_grid_w).flatten() - (llm_grid_h-1) // 2 + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand( + llm_grid_t, llm_grid_h, -1).flatten() - (llm_grid_w-1) // 2 + t_index = t_index * scale_factor + t_index = t_index + st_idx + h_index = h_index + t_index + w_index = w_index + t_index + + llm_pos_ids_list.append( + torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1)) + + if input_embeds.shape[1] > video_se[1]: + text_len = input_embeds.shape[1] - video_se[1] + # print(text_len) + + llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1][0].max().item() + 1, llm_pos_ids_list[-1][0].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1)) + # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape) + position_ids = torch.cat(llm_pos_ids_list, dim=-1) + assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}' + return position_ids + +def get_m_rope_index(input_embeds, video_se): + llm_pos_ids_list = [] + llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1)) + st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float' + nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS + ## m_rope rope + llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16 + t_index = torch.arange(nframes).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten() + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten() + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten() + llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1) + st_idx) + if input_embeds.shape[1] > video_se[1]: + text_len = input_embeds.shape[1] - video_se[1] + llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1].max().item() + 1, llm_pos_ids_list[-1].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1)) + position_ids = torch.cat(llm_pos_ids_list, dim=-1) + assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}' + return position_ids + +def get_m_modify_margin_index(input_embeds, video_se): + llm_pos_ids_list = [] + llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1)) + st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float' + nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS + ## m_rope rope + llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16 + + t_index = torch.arange(llm_grid_t).view(-1, 1).expand( + -1, llm_grid_h * llm_grid_w).flatten() + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand( + llm_grid_t, -1, llm_grid_w).flatten() - (llm_grid_h-1) // 2 + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand( + llm_grid_t, llm_grid_h, -1).flatten() - (llm_grid_w-1) // 2 + t_index = t_index + st_idx + h_index = h_index + t_index + w_index = w_index + t_index + + llm_pos_ids_list.append( + torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1)) + + if input_embeds.shape[1] > video_se[1]: + text_len = input_embeds.shape[1] - video_se[1] + # print(text_len) + + llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1].max().item() + 1, llm_pos_ids_list[-1].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1)) + # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape) + position_ids = torch.cat(llm_pos_ids_list, dim=-1) + assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}' + return position_ids + +def get_m_modify_no_center_index(input_embeds, video_se): + llm_pos_ids_list = [] + llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1)) + st_idx = llm_pos_ids_list[-1][0].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float' + nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS + ## m_rope rope + llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16 + + t_index = torch.arange(llm_grid_t).view(-1, 1).expand( + -1, llm_grid_h * llm_grid_w).flatten() + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand( + llm_grid_t, -1, llm_grid_w).flatten() + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand( + llm_grid_t, llm_grid_h, -1).flatten() + + llm_pos_ids_list.append( + torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1) + st_idx) + + if input_embeds.shape[1] > video_se[1]: + text_len = input_embeds.shape[1] - video_se[1] + # print(text_len) + + llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1][0].max().item() + 1, llm_pos_ids_list[-1][0].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1)) + # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape) + position_ids = torch.cat(llm_pos_ids_list, dim=-1) + assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}' + return position_ids + +def get_m_modify_index(input_embeds, video_se): + llm_pos_ids_list = [] + llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1)) + st_idx = llm_pos_ids_list[-1][0].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float' + nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS + ## m_rope rope + llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16 + + t_index = torch.arange(llm_grid_t).view(-1, 1).expand( + -1, llm_grid_h * llm_grid_w).flatten() + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand( + llm_grid_t, -1, llm_grid_w).flatten() - (llm_grid_h-1) // 2 + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand( + llm_grid_t, llm_grid_h, -1).flatten() - (llm_grid_w-1) // 2 + t_index = t_index + st_idx + h_index = h_index + t_index + w_index = w_index + t_index + + llm_pos_ids_list.append( + torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1)) + + if input_embeds.shape[1] > video_se[1]: + text_len = input_embeds.shape[1] - video_se[1] + # print(text_len) + + llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1][0].max().item() + 1, llm_pos_ids_list[-1][0].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1)) + # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape) + position_ids = torch.cat(llm_pos_ids_list, dim=-1) + assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}' + return position_ids + +def get_position_ids(input_embeds, rope_type, video_se): + if rope_type == 'vanilla_rope': + return get_vanilla_rope_index(input_embeds, video_se) + elif rope_type == 'tad_rope': + return get_time_rope_index(input_embeds, video_se) + get_vanilla_rope_index(input_embeds, video_se) + elif rope_type == 'm_rope': + return get_m_rope_index(input_embeds, video_se) + elif rope_type == 'videorope': + scale_factor = 2.0 + return get_t_scale2_rope_index(input_embeds, video_se, scale_factor) + else: + raise ValueError(f"not this rope: {rope_type}") + +# answer = "more bet" +def eval_forward(args, video_se, accelerator, model, input_embeds, answer_embeds, pad_id, answer_ids, tokenizer): + # first append answer_embeds to input_embeds + prompt_length = input_embeds.shape[1] + labels_length = answer_embeds.shape[1] + input_embeds = torch.cat([input_embeds, answer_embeds], dim=1) + # second pad input_embeds to the multiple of accelerator.num_processes + pad_tensor = torch.tensor( + [pad_id] + * ( + (accelerator.num_processes * 2) + - input_embeds.shape[1] % (accelerator.num_processes * 2) + ) + ).unsqueeze(0).unsqueeze(-1).expand(-1, -1, input_embeds.shape[-1]).to(accelerator.device) + input_embeds = torch.cat([input_embeds, pad_tensor], dim=1) + # position_ids = ( + # torch.arange(input_embeds.shape[1]).unsqueeze(0).expand(input_embeds.shape[0], -1) + # ).to(accelerator.device) + position_ids = get_position_ids(input_embeds, args.rope_type, video_se) + # ForkedPdb().set_trace() + accelerator.print(input_embeds.shape) + prepared = prepare_seq_parallel_inputs( + "zigzag_ring_attn", + input_embeds, + position_ids, + None, + accelerator.process_index, + accelerator.num_processes, + accelerator.device, + ) + local_input_embeds = prepared["local_input_ids"] + local_position_ids = prepared["local_position_ids"] + if 'm_modify' in args.rope_type or 't_only' in args.rope_type or 'change_freq' in args.rope_type: + from transformers.models.qwen2_vl import modeling_qwen2_vl + modeling_qwen2_vl.apply_multimodal_rotary_pos_emb = modeling_qwen2_vl.apply_m_modify_multimodal_rotary_pos_emb + with torch.inference_mode(): + hidden_states = model.model( + inputs_embeds=local_input_embeds, + position_ids=local_position_ids, + use_cache=False, + )[0] + logits = model.lm_head(hidden_states) + logits = logits.float() + + pred = logits.argmax(dim=-1) + + # gather all logits using accelerator.gather + def undo_extract_local(gathered_value, world_size, dim=1): + value_chunks = gathered_value.chunk(2 * world_size, dim=dim) + reordered_chunks = [None] * (2 * world_size) + for i in range(world_size): + reordered_chunks[i] = value_chunks[i * 2] + reordered_chunks[2 * world_size - i - 1] = value_chunks[i * 2 + 1] + return torch.cat(reordered_chunks, dim=dim) + + correct = False + + gathered_logits = accelerator.gather(pred.squeeze(0)).unsqueeze(0) + # undo extract local on the gathered logits + # ForkedPdb().set_trace() + pred = undo_extract_local(gathered_logits, accelerator.num_processes) + pred = pred[:, prompt_length - 1 : prompt_length + labels_length - 1] + # check if the logits are correct, extract argmax id + # compare the predicted_ids with the labels + correct = (pred == answer_ids.to(accelerator.device)).all() + if accelerator.is_main_process: + print( + "Predicted: ", + tokenizer.decode(pred.squeeze().tolist()), + "Answer: ", + tokenizer.decode(answer_ids.squeeze().tolist()), + ) + # print id as well + print( + "Predicted: ", + pred.squeeze().tolist(), + "Answer: ", + answer_ids.squeeze().tolist(), + ) + return int(correct) + + +def load_haystack(args, accelerator): + haystack_embeddings = torch.load(f"{args.haystack_dir}/video_embeddings.pt").to(torch.bfloat16) + + return haystack_embeddings + +def load_text_embeddings(str, tokenizer, model, accelerator, replace_double_newline=False): + token_ids = safe_tokenize(tokenizer, str) + def replace_double_newline_func(token_ids): + # subsitute token id 271 to two 198] + # for example: + # from: tensor([[128000, 128006, 9125, 128007, 271, 2675, 527, 264, 11190, 4221, 323, 11376, 18328, 13]]) + # to: tensor([[128000, 128006, 9125, 128007, 198, 198, 2675, 527, 264, 11190, 4221, 323, 11376, 18328, 13]]) + # length will increase by number of 271 + double_newline_loc = (token_ids == 271).nonzero()[:, 1] + double_newline_loc += torch.arange(len(double_newline_loc)) + if len(double_newline_loc) > 0: + for loc in double_newline_loc: + token_ids = torch.cat([token_ids[:, :loc], torch.tensor([[198, 198]]), token_ids[:, loc+1:]], dim=1) + return token_ids + if replace_double_newline: + token_ids = replace_double_newline_func(token_ids) + token_ids = token_ids.to(accelerator.device) + with torch.inference_mode(): + embeddings = model.model.embed_tokens(token_ids) + return embeddings.to(torch.bfloat16) + +def inference(args): + accelerator = Accelerator( + mixed_precision="bf16", + ) + model_path = args.model + model = Qwen2VLForConditionalGeneration.from_pretrained(model_path, + device_map=accelerator.device, + torch_dtype=torch.bfloat16, + attn_implementation="flash_attention_2" + ) + del model.visual + processor = AutoProcessor.from_pretrained("/mnt/hwfile/mllm/weixilin/cache/Qwen2-VL-7B-Instruct") + tokenizer = processor.tokenizer + + + kwargs = {"rope_theta": args.rope_theta} if args.rope_theta is not None else {} + tokenizer.pad_token = tokenizer.eos_token + # remember to remove + accelerator.print("Preparing Haystack...") + haystack_embeddings = load_haystack(args, accelerator) + target_length = args.max_frame_num * IMAGE_TOKENS + # ForkedPdb().set_trace() + if len(haystack_embeddings) < target_length: + repeat_times = (target_length + len(haystack_embeddings) - 1) // len(haystack_embeddings) # 向上取整计算需要重复的次数 + haystack_embeddings = torch.cat([haystack_embeddings] * repeat_times, dim=0)[:target_length] + + assert len(haystack_embeddings) >= args.max_frame_num * IMAGE_TOKENS, "Haystack embeddings are not enough. Max frame {} is not found. Currently only {} frames.".format(args.max_frame_num, len(haystack_embeddings)) + # import pdb; pdb.set_trace() + + haystack_embeddings = haystack_embeddings[:args.max_frame_num * IMAGE_TOKENS].to(accelerator.device) + prompt = prompt_templates[args.prompt_template] + preprompt_embeddings = load_text_embeddings(prompt["preprompt"], tokenizer, model, accelerator, args.replace_double_newline) + postprompt_embeddings = load_text_embeddings(prompt["postprompt"], tokenizer, model, accelerator, args.replace_double_newline) + + needle_dataset = read_json_file(args.needle_dataset) + answer_embedding_list = [] + answer_id_list = [] + needle_embedding_list = [] + question_embeding_list = [] + for index, instance in enumerate(needle_dataset): + answer = instance["answer"] + question = instance["prompt"] + needle_embedding_list.append(torch.load(args.needle_embedding_dir + f"/{index}.pt", map_location="cpu").to(torch.bfloat16).to(accelerator.device)) + answer_embedding_list.append(load_text_embeddings(answer, tokenizer, model, accelerator)) + answer_id_list.append(safe_tokenize(tokenizer, answer)) + question_embeding_list.append(load_text_embeddings(question, tokenizer, model, accelerator)) + + accelerator.print("Starting Evaluation...") + model = accelerator.prepare(model) + model.gradient_checkpointing_enable() + all_accuries = [] + for num_frames in tqdm( + range( + args.min_frame_num, args.max_frame_num + 1, args.frame_interval + ) + ): + for depth in np.arange(0, 1 + args.depth_interval, args.depth_interval): + accuracies = [] + for question_embedding, needle_embedding, answer_embedding, answer_id in zip(question_embeding_list, needle_embedding_list, answer_embedding_list, answer_id_list): + query_frame_idx = int(depth * num_frames) + # import pdb; pdb.set_trace() + input_frames = torch.cat([haystack_embeddings[:query_frame_idx * IMAGE_TOKENS].to(accelerator.device),needle_embedding.to(accelerator.device), haystack_embeddings[query_frame_idx*IMAGE_TOKENS:num_frames*IMAGE_TOKENS].to(accelerator.device)], dim=0).view(-1, haystack_embeddings.shape[-1]).unsqueeze(0) + input_emebds = torch.cat([preprompt_embeddings.to(accelerator.device), input_frames.to(accelerator.device),question_embedding.to(accelerator.device), postprompt_embeddings.to(accelerator.device)], dim=1) + video_se = (preprompt_embeddings.shape[1], preprompt_embeddings.shape[1] + input_frames.shape[1]) + correct = eval_forward( + args, video_se, accelerator, model, input_emebds, answer_embedding, tokenizer.pad_token_id, answer_id, tokenizer + ) + gc.collect() + torch.cuda.empty_cache() + if accelerator.is_main_process: + accuracies.append(correct) + if accelerator.is_main_process: + result = { + "Num. Frame": num_frames, + "Frame Depth": round(depth * 100, -1), + "Score": sum(accuracies) / len(accuracies), + } + accelerator.print(result) + all_accuries.append(result) + if accelerator.is_main_process: + model_name = args.model.split("/")[-1] + os.makedirs(f"{args.output_path}/{model_name}", exist_ok=True) + # save all_accuries as json + with open(f"{args.output_path}/{model_name}/all_accuracies.json", "w") as f: + json.dump(all_accuries, f, indent=4) + return all_accuries, accelerator + + +def plot(args, all_accuries): + df = pd.DataFrame(all_accuries) + cmap = LinearSegmentedColormap.from_list( + "custom_cmap", ["#F0496E", "#EBB839", "#9ad5b3"] + ) + + pivot_table = pd.pivot_table( + df, + values="Score", + index=["Frame Depth", "Num. Frame"], + aggfunc="mean", + ).reset_index() # This will aggregate + pivot_table = pivot_table.pivot( + index="Frame Depth", columns="Num. Frame", values="Score" + ) + # Create the heatmap with better aesthetics + plt.figure(figsize=(17.5, 8)) # Can adjust these dimensions as needed + ax = sns.heatmap( + pivot_table, + # annot=True, + fmt="g", + vmin=0, + vmax=1, + linecolor='white', + linewidths=1.5, + cmap=cmap, + cbar_kws={"label": "Score"}, + ) + + # Set the color bar label font size + cbar = ax.collections[0].colorbar + cbar.ax.yaxis.label.set_size(14) + cbar.ax.tick_params(labelsize=14) + + + # Define the formatter function + def thousands_formatter(x, pos): + if x >= 1000: + return f'{x/1000:.1f}K' + return f'{x}' + + context_lengths = pivot_table.columns + formatted_context_lengths = [thousands_formatter(x, None) for x in context_lengths] + + # More aesthetics + plt.xlabel("Num. of Frames", fontsize=14) # X-axis label + plt.ylabel("Depth Percent", fontsize=14) # Y-axis label + plt.xticks(ticks=[i + 0.5 for i in range(len(context_lengths))], labels=formatted_context_lengths, rotation=45, fontsize=14) + # plt.xticks(rotation=45, fontsize=14) # Rotates the x-axis labels to prevent overlap + plt.yticks(rotation=0, fontsize=14) # Ensures the y-axis labels are horizontal + plt.tight_layout() # Fits everything neatly into the figure area + # save + model_name = args.model.split("/")[-1] + + plt.savefig(f"{args.output_path}/{model_name}/heatmap.png") + # calculate average accuracy + average_accuracy = df["Score"].mean() + print(f"Average Accuracy: {average_accuracy}") + # save as txt + with open(f"{args.output_path}/{model_name}/avg_accuracy.txt", "w") as f: + f.write(f"Average Accuracy: {average_accuracy}\n") + +def main(args): + if args.plot_only: + # load all_accuracies from json + model_name = args.model.split("/")[-1] + with open(f"{args.output_path}/{model_name}/all_accuracies.json", "r") as f: + all_accuracies = json.load(f) + plot(args, all_accuracies) + else: + all_accuracies, accelerator = inference(args) + if accelerator.is_main_process: + plot(args, all_accuracies) + + +if __name__ == "__main__": + args = argparse.ArgumentParser() + args.add_argument("--model", type=str, default="/mnt/hwfile/mllm/weixilin/cache/Qwen2-VL-7B-Instruct") + args.add_argument("--max_frame_num", type=int, default=1500) + args.add_argument("--needle_dataset", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/needle_datasets/dataset.json") + args.add_argument("--min_frame_num", type=int, default=400) + args.add_argument("--frame_interval", type=int, default=100) + args.add_argument("--output_path", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/niah_output") + args.add_argument("--depth_interval", type=float, default=0.1) + args.add_argument("--num_samples", type=int, default=1) + args.add_argument("--rope_theta", type=float, default=None) + args.add_argument("--haystack_dir", type=str, default="your haystack_dir") + args.add_argument("--needle_embedding_dir", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/video_needle_haystack/data/needle_vicuna_embeddings") + args.add_argument("--prompt_template", type=str, default='qwen2') + args.add_argument("--image_tokens", type=int, default=144) + args.add_argument("--rope_type", type=str, default=None) + args.add_argument("--replace_double_newline", action="store_true") + args.add_argument("--plot_only", action="store_true") + args = args.parse_args() + IMAGE_TOKENS = args.image_tokens + main(args) diff --git a/vision_niah_d/eval_vision_niah_interrupt.py b/vision_niah_d/eval_vision_niah_interrupt.py new file mode 100644 index 0000000000000000000000000000000000000000..46c2ee74c3f97af29d55d6264c453285a52f84d5 --- /dev/null +++ b/vision_niah_d/eval_vision_niah_interrupt.py @@ -0,0 +1,572 @@ +import argparse +import gc +import sys +import torch +from transformers import AutoTokenizer +from transformers import LlamaForCausalLM +from easy_context import Qwen2ForCausalLM_RingAttn +from tqdm import tqdm +from accelerate import Accelerator +import glob +import numpy as np +from tqdm import tqdm +import gc +import matplotlib.pyplot as plt +import os +from matplotlib.colors import LinearSegmentedColormap +import seaborn as sns +import pandas as pd +from pathlib import Path +import random +import json +from datasets import load_dataset +from vision_niah.produce_needle_embedding import read_json_file +from easy_context import ( + prepare_seq_parallel_inputs, + apply_seq_parallel_monkey_patch, +) +from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor +from torchvision import io, transforms +from torchvision.transforms import InterpolationMode +apply_seq_parallel_monkey_patch("zigzag_ring_attn", "llama") + +import sys +import pdb +class ForkedPdb(pdb.Pdb): + """A Pdb subclass that may be used + from a forked multiprocessing child + """ + def interaction(self, *args, **kwargs): + _stdin = sys.stdin + try: + sys.stdin = open('/dev/stdin') + pdb.Pdb.interaction(self, *args, **kwargs) + finally: + sys.stdin = _stdin + +SEED = 24242424 +torch.manual_seed(SEED) +random.seed(SEED) +np.random.seed(SEED) +IMAGE_TOKENS = None +prompt_templates = { + "mistral": { + "preprompt": "[INST]", + "postprompt": " [/INST]" + }, + "vicuna": { + "preprompt": "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER:", + "postprompt": "ASSISTANT:" + }, + "llama3": { + "preprompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n", + "postprompt": "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", + }, + "qwen2": { + "preprompt": "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n", + "postprompt": "<|im_end|>\n<|im_start|>assistant\n", + }, + "yi": { + "preprompt": "<|im_start|>system\nAnswer the questions.<|im_end|>\n<|im_start|>user\n", + "postprompt": "<|im_end|>\n<|im_start|>assistant\n", + }, +} +# \nAnswer the question using a single word or phrase. +# The color of the bottle cap is +# answer = "Yellow" + + +def safe_tokenize(tokenizer, text): + tokenized = tokenizer.encode(text, return_tensors="pt") + if tokenizer.bos_token != None and len(tokenized) > 0 and tokenized[0, 0] == tokenizer.bos_token_id: + tokenized = tokenized[:, 1:] + return tokenized + +def get_vanilla_rope_index(input_embeds, video_se): + return torch.arange(input_embeds.shape[1]).view(1, 1, -1).expand(3, 1, -1) + +def get_time_rope_index(input_embeds, video_se): + llm_pos_ids_list = [] + llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1)) + assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float' + nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS + ## time rope + t_index = torch.arange(llm_pos_ids_list[-1].max().item() + 1, llm_pos_ids_list[-1].max().item() + 1 + nframes).repeat_interleave(IMAGE_TOKENS, dim=0).view(1, 1, -1).expand(3, 1, -1) + llm_pos_ids_list.append(t_index) + if input_embeds.shape[1] > video_se[1]: + text_len = input_embeds.shape[1] - video_se[1] + llm_pos_ids_list.append(torch.arange(t_index.max().item() + 1, text_len + t_index.max().item() + 1).view(1, 1, -1).expand(3, 1, -1)) + position_ids = torch.cat(llm_pos_ids_list, dim=-1) + assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}' + return position_ids + +def get_t_scale2_rope_index(input_embeds, video_se, scale_factor): + llm_pos_ids_list = [] + llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1)) + st_idx = llm_pos_ids_list[-1][0].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float' + nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS + ## m_rope rope + llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16 + + t_index = torch.arange(llm_grid_t).view(-1, 1).expand( + -1, llm_grid_h * llm_grid_w).flatten() + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand( + llm_grid_t, -1, llm_grid_w).flatten() - (llm_grid_h-1) // 2 + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand( + llm_grid_t, llm_grid_h, -1).flatten() - (llm_grid_w-1) // 2 + t_index = t_index * scale_factor + t_index = t_index + st_idx + h_index = h_index + t_index + w_index = w_index + t_index + + llm_pos_ids_list.append( + torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1)) + + if input_embeds.shape[1] > video_se[1]: + text_len = input_embeds.shape[1] - video_se[1] + # print(text_len) + + llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1][0].max().item() + 1, llm_pos_ids_list[-1][0].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1)) + # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape) + position_ids = torch.cat(llm_pos_ids_list, dim=-1) + assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}' + return position_ids + +def get_m_rope_index(input_embeds, video_se): + llm_pos_ids_list = [] + llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1)) + st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float' + nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS + ## m_rope rope + llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16 + t_index = torch.arange(nframes).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten() + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten() + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten() + llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1) + st_idx) + if input_embeds.shape[1] > video_se[1]: + text_len = input_embeds.shape[1] - video_se[1] + llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1].max().item() + 1, llm_pos_ids_list[-1].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1)) + position_ids = torch.cat(llm_pos_ids_list, dim=-1) + assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}' + return position_ids + +def get_m_modify_margin_index(input_embeds, video_se): + llm_pos_ids_list = [] + llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1)) + st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float' + nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS + ## m_rope rope + llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16 + + t_index = torch.arange(llm_grid_t).view(-1, 1).expand( + -1, llm_grid_h * llm_grid_w).flatten() + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand( + llm_grid_t, -1, llm_grid_w).flatten() - (llm_grid_h-1) // 2 + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand( + llm_grid_t, llm_grid_h, -1).flatten() - (llm_grid_w-1) // 2 + t_index = t_index + st_idx + h_index = h_index + t_index + w_index = w_index + t_index + + llm_pos_ids_list.append( + torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1)) + + if input_embeds.shape[1] > video_se[1]: + text_len = input_embeds.shape[1] - video_se[1] + # print(text_len) + + llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1].max().item() + 1, llm_pos_ids_list[-1].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1)) + # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape) + position_ids = torch.cat(llm_pos_ids_list, dim=-1) + assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}' + return position_ids + +def get_m_modify_no_center_index(input_embeds, video_se): + llm_pos_ids_list = [] + llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1)) + st_idx = llm_pos_ids_list[-1][0].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float' + nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS + ## m_rope rope + llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16 + + t_index = torch.arange(llm_grid_t).view(-1, 1).expand( + -1, llm_grid_h * llm_grid_w).flatten() + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand( + llm_grid_t, -1, llm_grid_w).flatten() + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand( + llm_grid_t, llm_grid_h, -1).flatten() + + llm_pos_ids_list.append( + torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1) + st_idx) + + if input_embeds.shape[1] > video_se[1]: + text_len = input_embeds.shape[1] - video_se[1] + # print(text_len) + + llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1][0].max().item() + 1, llm_pos_ids_list[-1][0].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1)) + # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape) + position_ids = torch.cat(llm_pos_ids_list, dim=-1) + assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}' + return position_ids + +def get_m_modify_index(input_embeds, video_se): + llm_pos_ids_list = [] + llm_pos_ids_list.append(torch.arange(video_se[0]).view(1, 1, -1).expand(3, 1, -1)) + st_idx = llm_pos_ids_list[-1][0].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + assert (video_se[1] - video_se[0]) % IMAGE_TOKENS == 0, 'frames should not be float' + nframes = (video_se[1] - video_se[0]) // IMAGE_TOKENS + ## m_rope rope + llm_grid_t, llm_grid_h, llm_grid_w = nframes, 9, 16 + + t_index = torch.arange(llm_grid_t).view(-1, 1).expand( + -1, llm_grid_h * llm_grid_w).flatten() + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand( + llm_grid_t, -1, llm_grid_w).flatten() - (llm_grid_h-1) // 2 + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand( + llm_grid_t, llm_grid_h, -1).flatten() - (llm_grid_w-1) // 2 + t_index = t_index + st_idx + h_index = h_index + t_index + w_index = w_index + t_index + + llm_pos_ids_list.append( + torch.stack([t_index, h_index, w_index]).unsqueeze(dim=1)) + + if input_embeds.shape[1] > video_se[1]: + text_len = input_embeds.shape[1] - video_se[1] + # print(text_len) + + llm_pos_ids_list.append(torch.arange(llm_pos_ids_list[-1][0].max().item() + 1, llm_pos_ids_list[-1][0].max().item() + 1 + text_len).view(1, 1, -1).expand(3, 1, -1)) + # print(llm_pos_ids_list[0].shape, llm_pos_ids_list[1].shape, llm_pos_ids_list[2].shape) + position_ids = torch.cat(llm_pos_ids_list, dim=-1) + assert position_ids.shape[-1] == input_embeds.shape[1], f'shape mismatch! {position_ids.shape[-1]=}, {input_embeds.shape[1]=}' + return position_ids + +def get_position_ids(input_embeds, rope_type, video_se): + if rope_type == 'vanilla_rope': + return get_vanilla_rope_index(input_embeds, video_se) + elif rope_type == 'tad_rope': + return get_time_rope_index(input_embeds, video_se) + get_vanilla_rope_index(input_embeds, video_se) + elif rope_type == 'm_rope': + return get_m_rope_index(input_embeds, video_se) + elif rope_type == 'videorope': + scale_factor = 2.0 + return get_t_scale2_rope_index(input_embeds, video_se, scale_factor) + else: + raise ValueError(f"not this rope: {rope_type}") + +# answer = "more bet" +def eval_forward(args, video_se, accelerator, model, input_embeds, answer_embeds, pad_id, answer_ids, tokenizer): + # first append answer_embeds to input_embeds + prompt_length = input_embeds.shape[1] + labels_length = answer_embeds.shape[1] + input_embeds = torch.cat([input_embeds, answer_embeds], dim=1) + # second pad input_embeds to the multiple of accelerator.num_processes + pad_tensor = torch.tensor( + [pad_id] + * ( + (accelerator.num_processes * 2) + - input_embeds.shape[1] % (accelerator.num_processes * 2) + ) + ).unsqueeze(0).unsqueeze(-1).expand(-1, -1, input_embeds.shape[-1]).to(accelerator.device) + input_embeds = torch.cat([input_embeds, pad_tensor], dim=1) + # position_ids = ( + # torch.arange(input_embeds.shape[1]).unsqueeze(0).expand(input_embeds.shape[0], -1) + # ).to(accelerator.device) + position_ids = get_position_ids(input_embeds, args.rope_type, video_se) + # ForkedPdb().set_trace() + accelerator.print(input_embeds.shape) + prepared = prepare_seq_parallel_inputs( + "zigzag_ring_attn", + input_embeds, + position_ids, + None, + accelerator.process_index, + accelerator.num_processes, + accelerator.device, + ) + local_input_embeds = prepared["local_input_ids"] + local_position_ids = prepared["local_position_ids"] + if 'm_modify' in args.rope_type or 't_only' in args.rope_type or 'change_freq' in args.rope_type: + from transformers.models.qwen2_vl import modeling_qwen2_vl + modeling_qwen2_vl.apply_multimodal_rotary_pos_emb = modeling_qwen2_vl.apply_m_modify_multimodal_rotary_pos_emb + with torch.inference_mode(): + hidden_states = model.model( + inputs_embeds=local_input_embeds, + position_ids=local_position_ids, + use_cache=False, + )[0] + logits = model.lm_head(hidden_states) + logits = logits.float() + + pred = logits.argmax(dim=-1) + + # gather all logits using accelerator.gather + def undo_extract_local(gathered_value, world_size, dim=1): + value_chunks = gathered_value.chunk(2 * world_size, dim=dim) + reordered_chunks = [None] * (2 * world_size) + for i in range(world_size): + reordered_chunks[i] = value_chunks[i * 2] + reordered_chunks[2 * world_size - i - 1] = value_chunks[i * 2 + 1] + return torch.cat(reordered_chunks, dim=dim) + + correct = False + + gathered_logits = accelerator.gather(pred.squeeze(0)).unsqueeze(0) + # undo extract local on the gathered logits + # ForkedPdb().set_trace() + pred = undo_extract_local(gathered_logits, accelerator.num_processes) + pred = pred[:, prompt_length - 1 : prompt_length + labels_length - 1] + # check if the logits are correct, extract argmax id + # compare the predicted_ids with the labels + correct = (pred == answer_ids.to(accelerator.device)).all() + if accelerator.is_main_process: + print( + "Predicted: ", + tokenizer.decode(pred.squeeze().tolist()), + "Answer: ", + tokenizer.decode(answer_ids.squeeze().tolist()), + ) + # print id as well + print( + "Predicted: ", + pred.squeeze().tolist(), + "Answer: ", + answer_ids.squeeze().tolist(), + ) + return int(correct) + + +def load_haystack(args, accelerator): + haystack_embeddings = torch.load(f"{args.haystack_dir}/video_embeddings.pt").to(torch.bfloat16) + + return haystack_embeddings + +def load_text_embeddings(str, tokenizer, model, accelerator, replace_double_newline=False): + token_ids = safe_tokenize(tokenizer, str) + def replace_double_newline_func(token_ids): + # subsitute token id 271 to two 198] + # for example: + # from: tensor([[128000, 128006, 9125, 128007, 271, 2675, 527, 264, 11190, 4221, 323, 11376, 18328, 13]]) + # to: tensor([[128000, 128006, 9125, 128007, 198, 198, 2675, 527, 264, 11190, 4221, 323, 11376, 18328, 13]]) + # length will increase by number of 271 + double_newline_loc = (token_ids == 271).nonzero()[:, 1] + double_newline_loc += torch.arange(len(double_newline_loc)) + if len(double_newline_loc) > 0: + for loc in double_newline_loc: + token_ids = torch.cat([token_ids[:, :loc], torch.tensor([[198, 198]]), token_ids[:, loc+1:]], dim=1) + return token_ids + if replace_double_newline: + token_ids = replace_double_newline_func(token_ids) + token_ids = token_ids.to(accelerator.device) + with torch.inference_mode(): + embeddings = model.model.embed_tokens(token_ids) + return embeddings.to(torch.bfloat16) + +def inference(args): + accelerator = Accelerator( + mixed_precision="bf16", + ) + model_path = args.model + model = Qwen2VLForConditionalGeneration.from_pretrained(model_path, + device_map=accelerator.device, + torch_dtype=torch.bfloat16, + attn_implementation="flash_attention_2" + ) + del model.visual + processor = AutoProcessor.from_pretrained("/mnt/hwfile/mllm/weixilin/cache/Qwen2-VL-7B-Instruct") + tokenizer = processor.tokenizer + + + kwargs = {"rope_theta": args.rope_theta} if args.rope_theta is not None else {} + tokenizer.pad_token = tokenizer.eos_token + # remember to remove + accelerator.print("Preparing Haystack...") + haystack_embeddings = load_haystack(args, accelerator) + target_length = args.max_frame_num * IMAGE_TOKENS + # ForkedPdb().set_trace() + if len(haystack_embeddings) < target_length: + repeat_times = (target_length + len(haystack_embeddings) - 1) // len(haystack_embeddings) # 向上取整计算需要重复的次数 + haystack_embeddings = torch.cat([haystack_embeddings] * repeat_times, dim=0)[:target_length] + + assert len(haystack_embeddings) >= args.max_frame_num * IMAGE_TOKENS, "Haystack embeddings are not enough. Max frame {} is not found. Currently only {} frames.".format(args.max_frame_num, len(haystack_embeddings)) + # import pdb; pdb.set_trace() + + haystack_embeddings = haystack_embeddings[:args.max_frame_num * IMAGE_TOKENS].to(accelerator.device) + prompt = prompt_templates[args.prompt_template] + preprompt_embeddings = load_text_embeddings(prompt["preprompt"], tokenizer, model, accelerator, args.replace_double_newline) + postprompt_embeddings = load_text_embeddings(prompt["postprompt"], tokenizer, model, accelerator, args.replace_double_newline) + + needle_dataset = read_json_file(args.needle_dataset) + answer_embedding_list = [] + answer_id_list = [] + needle_embedding_list = [] + needle_embedding_interrupt_list = [] + question_embeding_list = [] + for index, instance in enumerate(needle_dataset): + answer = instance["answer"] + question = instance["prompt"] + needle_embedding_list.append(torch.load(args.needle_embedding_dir + f"/{index}.pt", map_location="cpu").to(torch.bfloat16).to(accelerator.device)) + needle_embedding_interrupt_list.append(torch.load(args.needle_embedding_interrupt_dir + f"/{index}.pt", map_location="cpu").to(torch.bfloat16).to(accelerator.device)) + answer_embedding_list.append(load_text_embeddings(answer, tokenizer, model, accelerator)) + answer_id_list.append(safe_tokenize(tokenizer, answer)) + question_embeding_list.append(load_text_embeddings(question, tokenizer, model, accelerator)) + + accelerator.print("Starting Evaluation...") + model = accelerator.prepare(model) + model.gradient_checkpointing_enable() + all_accuries = [] + for num_frames in tqdm( + range( + args.min_frame_num, args.max_frame_num + 1, args.frame_interval + ) + ): + for depth in np.arange(0, 1 + args.depth_interval, args.depth_interval): + accuracies = [] + for question_embedding, needle_embedding, needle_embedding_interrupt, answer_embedding, answer_id in zip(question_embeding_list, needle_embedding_list, needle_embedding_interrupt_list, answer_embedding_list, answer_id_list): + query_frame_idx = int(depth * num_frames) + # import pdb; pdb.set_trace() + #! interrupt every 200 frames + import random + test_p = random.random() + mode = 'no' + cycle = 200 + if query_frame_idx - cycle <= 0 and query_frame_idx + cycle >= num_frames: mode = 'no' + elif query_frame_idx < cycle: mode = 'after' + elif query_frame_idx + cycle >= num_frames: mode = 'before' + elif test_p < 0.5: mode = 'before' + else: mode = 'after' + print(f"{mode=}") + if mode == 'before': + input_frames = torch.cat([haystack_embeddings[:(query_frame_idx-cycle) * IMAGE_TOKENS].to(accelerator.device), needle_embedding_interrupt.to(accelerator.device), haystack_embeddings[(query_frame_idx-cycle) * IMAGE_TOKENS:query_frame_idx * IMAGE_TOKENS].to(accelerator.device), needle_embedding.to(accelerator.device), haystack_embeddings[query_frame_idx*IMAGE_TOKENS:num_frames*IMAGE_TOKENS].to(accelerator.device)], dim=0).view(-1, haystack_embeddings.shape[-1]).unsqueeze(0) + elif mode == 'after': + input_frames = torch.cat([haystack_embeddings[:query_frame_idx * IMAGE_TOKENS].to(accelerator.device),needle_embedding.to(accelerator.device), haystack_embeddings[query_frame_idx*IMAGE_TOKENS:(query_frame_idx+cycle)*IMAGE_TOKENS].to(accelerator.device), needle_embedding_interrupt.to(accelerator.device), haystack_embeddings[(query_frame_idx+cycle)*IMAGE_TOKENS:num_frames*IMAGE_TOKENS].to(accelerator.device)], dim=0).view(-1, haystack_embeddings.shape[-1]).unsqueeze(0) + else: + input_frames = torch.cat([haystack_embeddings[:query_frame_idx * IMAGE_TOKENS].to(accelerator.device),needle_embedding.to(accelerator.device), haystack_embeddings[query_frame_idx*IMAGE_TOKENS:num_frames*IMAGE_TOKENS].to(accelerator.device)], dim=0).view(-1, haystack_embeddings.shape[-1]).unsqueeze(0) + input_emebds = torch.cat([preprompt_embeddings.to(accelerator.device), input_frames.to(accelerator.device),question_embedding.to(accelerator.device), postprompt_embeddings.to(accelerator.device)], dim=1) + video_se = (preprompt_embeddings.shape[1], preprompt_embeddings.shape[1] + input_frames.shape[1]) + # ForkedPdb().set_trace() + correct = eval_forward( + args, video_se, accelerator, model, input_emebds, answer_embedding, tokenizer.pad_token_id, answer_id, tokenizer + ) + gc.collect() + torch.cuda.empty_cache() + if accelerator.is_main_process: + accuracies.append(correct) + if accelerator.is_main_process: + result = { + "Num. Frame": num_frames, + "Frame Depth": round(depth * 100, -1), + "Score": sum(accuracies) / len(accuracies), + } + accelerator.print(result) + all_accuries.append(result) + if accelerator.is_main_process: + model_name = args.model.split("/")[-1] + os.makedirs(f"{args.output_path}/{model_name}", exist_ok=True) + # save all_accuries as json + with open(f"{args.output_path}/{model_name}/all_accuracies.json", "w") as f: + json.dump(all_accuries, f, indent=4) + return all_accuries, accelerator + + +def plot(args, all_accuries): + df = pd.DataFrame(all_accuries) + cmap = LinearSegmentedColormap.from_list( + "custom_cmap", ["#F0496E", "#EBB839", "#9ad5b3"] + ) + + pivot_table = pd.pivot_table( + df, + values="Score", + index=["Frame Depth", "Num. Frame"], + aggfunc="mean", + ).reset_index() # This will aggregate + pivot_table = pivot_table.pivot( + index="Frame Depth", columns="Num. Frame", values="Score" + ) + # Create the heatmap with better aesthetics + plt.figure(figsize=(17.5, 8)) # Can adjust these dimensions as needed + ax = sns.heatmap( + pivot_table, + # annot=True, + fmt="g", + vmin=0, + vmax=1, + linecolor='white', + linewidths=1.5, + cmap=cmap, + cbar_kws={"label": "Score"}, + ) + + # Set the color bar label font size + cbar = ax.collections[0].colorbar + cbar.ax.yaxis.label.set_size(14) + cbar.ax.tick_params(labelsize=14) + + + # Define the formatter function + def thousands_formatter(x, pos): + if x >= 1000: + return f'{x/1000:.1f}K' + return f'{x}' + + context_lengths = pivot_table.columns + formatted_context_lengths = [thousands_formatter(x, None) for x in context_lengths] + + # More aesthetics + plt.xlabel("Num. of Frames", fontsize=14) # X-axis label + plt.ylabel("Depth Percent", fontsize=14) # Y-axis label + plt.xticks(ticks=[i + 0.5 for i in range(len(context_lengths))], labels=formatted_context_lengths, rotation=45, fontsize=14) + # plt.xticks(rotation=45, fontsize=14) # Rotates the x-axis labels to prevent overlap + plt.yticks(rotation=0, fontsize=14) # Ensures the y-axis labels are horizontal + plt.tight_layout() # Fits everything neatly into the figure area + # save + model_name = args.model.split("/")[-1] + + plt.savefig(f"{args.output_path}/{model_name}/heatmap.png") + # calculate average accuracy + average_accuracy = df["Score"].mean() + print(f"Average Accuracy: {average_accuracy}") + # save as txt + with open(f"{args.output_path}/{model_name}/avg_accuracy.txt", "w") as f: + f.write(f"Average Accuracy: {average_accuracy}\n") + +def main(args): + if args.plot_only: + # load all_accuracies from json + model_name = args.model.split("/")[-1] + with open(f"{args.output_path}/{model_name}/all_accuracies.json", "r") as f: + all_accuracies = json.load(f) + plot(args, all_accuracies) + else: + all_accuracies, accelerator = inference(args) + if accelerator.is_main_process: + plot(args, all_accuracies) + + +if __name__ == "__main__": + args = argparse.ArgumentParser() + args.add_argument("--model", type=str, default="/mnt/hwfile/mllm/weixilin/cache/Qwen2-VL-7B-Instruct") + args.add_argument("--max_frame_num", type=int, default=1500) + args.add_argument("--needle_dataset", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/needle_datasets/dataset_change_format.json") + args.add_argument("--min_frame_num", type=int, default=400) + args.add_argument("--frame_interval", type=int, default=100) + args.add_argument("--output_path", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/niah_output") + args.add_argument("--depth_interval", type=float, default=0.1) + args.add_argument("--num_samples", type=int, default=1) + args.add_argument("--rope_theta", type=float, default=None) + args.add_argument("--haystack_dir", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/video_needle_haystack/data/haystack_vicuna_embeddings_concat3000frames_144tokens_has_background") + args.add_argument("--needle_embedding_dir", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/video_needle_haystack/data/needle_vicuna_embeddings") + args.add_argument("--needle_embedding_interrupt_dir", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/video_needle_haystack/data/needle_vicuna_embeddings") + args.add_argument("--prompt_template", type=str, default='qwen2') + args.add_argument("--image_tokens", type=int, default=144) + args.add_argument("--rope_type", type=str, default=None) + args.add_argument("--replace_double_newline", action="store_true") + args.add_argument("--plot_only", action="store_true") + args = args.parse_args() + IMAGE_TOKENS = args.image_tokens + main(args) diff --git a/vision_niah_d/needle_datasets/dataset.json b/vision_niah_d/needle_datasets/dataset.json new file mode 100644 index 0000000000000000000000000000000000000000..1b40c196c515a3628d374673bb72211e69cfda1d --- /dev/null +++ b/vision_niah_d/needle_datasets/dataset.json @@ -0,0 +1,27 @@ +[ + { + "path": "zoo.png", + "prompt": "\nFind the frame with the word 'zoo'. What is the animal outside the zoo shop?\nA. lion\nB. tiger\nC. horse\nD. dog\nAnswer with the option's letter from the given choices directly.", + "answer": "B" + }, + { + "path": "sora_balloon.png", + "prompt": "\nFind the frame of a couple in a wedding. In side the frame, there is a balloon on the bridegroom's head. What is the color of that ballon?\nA. Yellow\nB. Red\nC. Blue\nD. White\nPlease provide your answer by stating the letter followed by the full option.", + "answer": "A" + }, + { + "path": "selenium_green.jpg", + "prompt": "\nFind the frame with the image of Selenium tablets. How many mg does each tablet contain?\nAnswer the question using a single word or phrase.", + "answer": "200" + }, + { + "path": "panda_scientist.png", + "prompt": "\nFind the frame of a scientist. The scientist is a...\nA. Bird\nB. Elephant\nC. Panda\nD. Dog\nPlease provide your answer by stating the letter followed by the full option.", + "answer": "C" + }, + { + "path": "teddy_bear_times_square.png", + "prompt": "\nFind the frame of a teddy bear. Where is this teddy bear?\nA. Times Square\nB. Eiffel Tower\nC. Taj Mahal\nD. Sydney Opera House\nPlease provide your answer by stating the letter followed by the full option.", + "answer": "A" + } +] \ No newline at end of file diff --git a/vision_niah_d/needle_datasets/dataset_interrupt.json b/vision_niah_d/needle_datasets/dataset_interrupt.json new file mode 100644 index 0000000000000000000000000000000000000000..e5c54607d3018b901167a019ec3c3efa85845bd1 --- /dev/null +++ b/vision_niah_d/needle_datasets/dataset_interrupt.json @@ -0,0 +1,27 @@ +[ + { + "path": "zoo_interrupt.png", + "prompt": "\nFind the frame with the word 'zoo'. What is the animal outside the zoo shop?\nA. lion\nB. tiger\nC. horse\nD. dog\nAnswer with the option's letter from the given choices directly.", + "answer": "B" + }, + { + "path": "sora_balloon_interrupt.png", + "prompt": "\nFind the frame of a couple in a wedding. In side the frame, there is a balloon on the bridegroom's head. What is the color of that ballon?\nA. Yellow\nB. Red\nC. Blue\nD. White\nPlease provide your answer by stating the letter followed by the full option.", + "answer": "A" + }, + { + "path": "selenium_green_interrupt.png", + "prompt": "\nFind the frame with the image of Selenium tablets. How many mg does each tablet contain?\nAnswer the question using a single word or phrase.", + "answer": "200" + }, + { + "path": "panda_scientist_interrupt.png", + "prompt": "\nFind the frame of a scientist. The scientist is a...\nA. Bird\nB. Elephant\nC. Panda\nD. Dog\nPlease provide your answer by stating the letter followed by the full option.", + "answer": "C" + }, + { + "path": "teddy_bear_times_square_interrupt.png", + "prompt": "\nFind the frame of a teddy bear. Where is this teddy bear?\nA. Times Square\nB. Eiffel Tower\nC. Taj Mahal\nD. Sydney Opera House\nPlease provide your answer by stating the letter followed by the full option.", + "answer": "A" + } +] \ No newline at end of file diff --git a/vision_niah_d/needle_datasets/git_placeholder b/vision_niah_d/needle_datasets/git_placeholder new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vision_niah_d/needle_datasets/images/astronaut.png b/vision_niah_d/needle_datasets/images/astronaut.png new file mode 100644 index 0000000000000000000000000000000000000000..9cb6d4a04f108a2af2e66206d985022efea94c41 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/astronaut.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa2edd0f58161729b0fc708ba2132a616d99d8f3c336cdf6de7ef0cf2ae4701a +size 2112952 diff --git a/vision_niah_d/needle_datasets/images/construction_site.png b/vision_niah_d/needle_datasets/images/construction_site.png new file mode 100644 index 0000000000000000000000000000000000000000..a141a4044b1be333e18b7cd3b6bd36c5446ca503 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/construction_site.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:356159a446e46ce0909fdd791b96d12e38bbf6d2ad84e2e4309b779004cc9655 +size 2281748 diff --git a/vision_niah_d/needle_datasets/images/dolphin.png b/vision_niah_d/needle_datasets/images/dolphin.png new file mode 100644 index 0000000000000000000000000000000000000000..fa2a992972391465835f8721af75c2842ca81b76 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/dolphin.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8865563b7736314c90fe285e7df945c6338e08b03cd99c329985489f10d751f +size 4548556 diff --git a/vision_niah_d/needle_datasets/images/llava-next.png b/vision_niah_d/needle_datasets/images/llava-next.png new file mode 100644 index 0000000000000000000000000000000000000000..447e8070802c15a12912682d67c0da3a59e1bbf0 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/llava-next.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec1ec34bc2982c72a4a39d39fedaa77b210cedfb5b9bb2a8b22c6dcf0f569e26 +size 1997880 diff --git a/vision_niah_d/needle_datasets/images/panda_scientist.png b/vision_niah_d/needle_datasets/images/panda_scientist.png new file mode 100644 index 0000000000000000000000000000000000000000..37f4faab638c90e01ad45e4b3cca73faf9dbbe2c --- /dev/null +++ b/vision_niah_d/needle_datasets/images/panda_scientist.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb19d41d4d6fe9e011ec19f3df132d6c241dc0fccf3cf29d3bdc86d50a37ecf4 +size 4914202 diff --git a/vision_niah_d/needle_datasets/images/panda_scientist_interrupt.png b/vision_niah_d/needle_datasets/images/panda_scientist_interrupt.png new file mode 100644 index 0000000000000000000000000000000000000000..56a33992359de16224aed6c55ae1eb7f38db4d27 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/panda_scientist_interrupt.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be6fcf8c95a9d9be6bcd690728b2ae02d9d6184c73f31d9a94987bcd75c9dbf6 +size 71594 diff --git a/vision_niah_d/needle_datasets/images/selenium_green.jpg b/vision_niah_d/needle_datasets/images/selenium_green.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74a1579d8869017f75d816fb9f25015d1f0f77b1 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/selenium_green.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64ad70bd814cd4398bd1768deab93c200a44743ca8ff53b7e633b68cfee1a94f +size 112411 diff --git a/vision_niah_d/needle_datasets/images/selenium_green_interrupt.png b/vision_niah_d/needle_datasets/images/selenium_green_interrupt.png new file mode 100644 index 0000000000000000000000000000000000000000..74d93dde3a1350f1ffc10c2d0f421267df92f1f5 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/selenium_green_interrupt.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41d563dac3b7e2e069cfeb3f25a8447aec8a63d164d3a8a16fb0343053bdaa71 +size 44602 diff --git a/vision_niah_d/needle_datasets/images/sora_balloon.png b/vision_niah_d/needle_datasets/images/sora_balloon.png new file mode 100644 index 0000000000000000000000000000000000000000..f5067009fb3a2541e444894d754f5e0d6c400065 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/sora_balloon.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c9bfd8d732b27dad23b9f93be38e18d3f5e4cf281933c5b1a401602d42b8c3d +size 1508447 diff --git a/vision_niah_d/needle_datasets/images/sora_balloon_interrupt.png b/vision_niah_d/needle_datasets/images/sora_balloon_interrupt.png new file mode 100644 index 0000000000000000000000000000000000000000..ca251803d21a663b42c144b9213853d335ba5665 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/sora_balloon_interrupt.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1443d2fb3526d21ae891a8dc090e770b9166c7375822bf026069bd9888dbf37 +size 39612 diff --git a/vision_niah_d/needle_datasets/images/teddy_bear_times_square.png b/vision_niah_d/needle_datasets/images/teddy_bear_times_square.png new file mode 100644 index 0000000000000000000000000000000000000000..0f296ef36c95049d1ba964e77ae0a0809a17a1f6 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/teddy_bear_times_square.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51b6e1321ab1230d91eb8d8562195751a5583b41665ac7957122367cde9b221d +size 5461199 diff --git a/vision_niah_d/needle_datasets/images/teddy_bear_times_square_interrupt.png b/vision_niah_d/needle_datasets/images/teddy_bear_times_square_interrupt.png new file mode 100644 index 0000000000000000000000000000000000000000..4a632309bfd97831918b480fcfd1f19baa2f3288 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/teddy_bear_times_square_interrupt.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9575a239825d4ea064a78edac78905fbdb031051b332d905a44253214bfc44ef +size 121392 diff --git a/vision_niah_d/needle_datasets/images/ucsd.jpeg b/vision_niah_d/needle_datasets/images/ucsd.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..750fe7573826bdfe0e1685209079069325a01d16 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/ucsd.jpeg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3c96c65dbaed8e2bd9e8cc8dd4bf264f31109e3a55c108446569496058dd315 +size 328801 diff --git a/vision_niah_d/needle_datasets/images/ucsd_interrupt.png b/vision_niah_d/needle_datasets/images/ucsd_interrupt.png new file mode 100644 index 0000000000000000000000000000000000000000..1092071e8fd5de313c6172ff7a97c35fba4d4797 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/ucsd_interrupt.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb810841a1d0d6b83da5a9984079d565aa600e373968982010f8d551c682fca2 +size 84954 diff --git a/vision_niah_d/needle_datasets/images/zoo.png b/vision_niah_d/needle_datasets/images/zoo.png new file mode 100644 index 0000000000000000000000000000000000000000..0bd6418676ae344684aac9f1c5716b5171f3a660 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/zoo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5fb103488d7a043baad71d001b7a62124a64b547192ffa6d112dd38abec40b3 +size 2481456 diff --git a/vision_niah_d/needle_datasets/images/zoo_interrupt.png b/vision_niah_d/needle_datasets/images/zoo_interrupt.png new file mode 100644 index 0000000000000000000000000000000000000000..380fa9015d5391983b7cc114829c4fd3786cd4b2 --- /dev/null +++ b/vision_niah_d/needle_datasets/images/zoo_interrupt.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5ef104d4555027cab2f4161970137da1c49cbb54258a6a364496df8af88c350 +size 46562 diff --git a/vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json b/vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json new file mode 100644 index 0000000000000000000000000000000000000000..5670c1a857c0c16c702d0d25aa8691dbb587f92b --- /dev/null +++ b/vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json @@ -0,0 +1,452 @@ +[ + { + "Num. Frame": 100, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 100, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 100, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 100, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 100, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 100, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 300, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 300, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 300, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 300, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 300, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 300, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 500, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 500, + "Frame Depth": 20.0, + "Score": 0.6 + }, + { + "Num. Frame": 500, + "Frame Depth": 40.0, + "Score": 0.6 + }, + { + "Num. Frame": 500, + "Frame Depth": 60.0, + "Score": 0.6 + }, + { + "Num. Frame": 500, + "Frame Depth": 80.0, + "Score": 0.6 + }, + { + "Num. Frame": 500, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 700, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 700, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 700, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 700, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 700, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 700, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 900, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 900, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 900, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 900, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 900, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 900, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 1100, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 1100, + "Frame Depth": 20.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 40.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 60.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 80.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 1300, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 1300, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 1300, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 1300, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 1300, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 1300, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 1500, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 1500, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 1500, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 1500, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 1500, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 1500, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 20.0, + "Score": 0.6 + }, + { + "Num. Frame": 1700, + "Frame Depth": 40.0, + "Score": 0.6 + }, + { + "Num. Frame": 1700, + "Frame Depth": 60.0, + "Score": 0.6 + }, + { + "Num. Frame": 1700, + "Frame Depth": 80.0, + "Score": 0.6 + }, + { + "Num. Frame": 1700, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 1900, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 1900, + "Frame Depth": 20.0, + "Score": 0.6 + }, + { + "Num. Frame": 1900, + "Frame Depth": 40.0, + "Score": 0.6 + }, + { + "Num. Frame": 1900, + "Frame Depth": 60.0, + "Score": 0.6 + }, + { + "Num. Frame": 1900, + "Frame Depth": 80.0, + "Score": 0.6 + }, + { + "Num. Frame": 1900, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 20.0, + "Score": 0.4 + }, + { + "Num. Frame": 2100, + "Frame Depth": 40.0, + "Score": 0.4 + }, + { + "Num. Frame": 2100, + "Frame Depth": 60.0, + "Score": 0.4 + }, + { + "Num. Frame": 2100, + "Frame Depth": 80.0, + "Score": 0.4 + }, + { + "Num. Frame": 2100, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 20.0, + "Score": 0.6 + }, + { + "Num. Frame": 2300, + "Frame Depth": 40.0, + "Score": 0.6 + }, + { + "Num. Frame": 2300, + "Frame Depth": 60.0, + "Score": 0.6 + }, + { + "Num. Frame": 2300, + "Frame Depth": 80.0, + "Score": 0.6 + }, + { + "Num. Frame": 2300, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 2500, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 2500, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 2500, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 2500, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 2700, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 2700, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 2700, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 2700, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 20.0, + "Score": 0.6 + }, + { + "Num. Frame": 2900, + "Frame Depth": 40.0, + "Score": 0.6 + }, + { + "Num. Frame": 2900, + "Frame Depth": 60.0, + "Score": 0.6 + }, + { + "Num. Frame": 2900, + "Frame Depth": 80.0, + "Score": 0.6 + }, + { + "Num. Frame": 2900, + "Frame Depth": 100.0, + "Score": 1.0 + } +] \ No newline at end of file diff --git a/vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt b/vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt new file mode 100644 index 0000000000000000000000000000000000000000..0ab86c2e2902f713e7989048affdf0afb52ea12d --- /dev/null +++ b/vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt @@ -0,0 +1 @@ +Average Accuracy: 0.7866666666666665 diff --git a/vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png b/vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png new file mode 100644 index 0000000000000000000000000000000000000000..e38153919b91d886357d2a9a450bf09830c17246 --- /dev/null +++ b/vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7acaa6cf05408494c0e97a80065b3ab5250894b84eb5732f74df189b20cd689 +size 42407 diff --git a/vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/all_accuracies.json b/vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/all_accuracies.json new file mode 100644 index 0000000000000000000000000000000000000000..345eba17b12914e5e15ef4fbf62ff78b68cdd290 --- /dev/null +++ b/vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/all_accuracies.json @@ -0,0 +1,452 @@ +[ + { + "Num. Frame": 100, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 100, + "Frame Depth": 20.0, + "Score": 1.0 + }, + { + "Num. Frame": 100, + "Frame Depth": 40.0, + "Score": 1.0 + }, + { + "Num. Frame": 100, + "Frame Depth": 60.0, + "Score": 1.0 + }, + { + "Num. Frame": 100, + "Frame Depth": 80.0, + "Score": 1.0 + }, + { + "Num. Frame": 100, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 300, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 300, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 300, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 300, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 300, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 300, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 500, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 500, + "Frame Depth": 20.0, + "Score": 1.0 + }, + { + "Num. Frame": 500, + "Frame Depth": 40.0, + "Score": 1.0 + }, + { + "Num. Frame": 500, + "Frame Depth": 60.0, + "Score": 1.0 + }, + { + "Num. Frame": 500, + "Frame Depth": 80.0, + "Score": 1.0 + }, + { + "Num. Frame": 500, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 700, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 700, + "Frame Depth": 20.0, + "Score": 1.0 + }, + { + "Num. Frame": 700, + "Frame Depth": 40.0, + "Score": 1.0 + }, + { + "Num. Frame": 700, + "Frame Depth": 60.0, + "Score": 1.0 + }, + { + "Num. Frame": 700, + "Frame Depth": 80.0, + "Score": 1.0 + }, + { + "Num. Frame": 700, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 900, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 900, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 900, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 900, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 900, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 900, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 1100, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 1100, + "Frame Depth": 20.0, + "Score": 1.0 + }, + { + "Num. Frame": 1100, + "Frame Depth": 40.0, + "Score": 1.0 + }, + { + "Num. Frame": 1100, + "Frame Depth": 60.0, + "Score": 1.0 + }, + { + "Num. Frame": 1100, + "Frame Depth": 80.0, + "Score": 1.0 + }, + { + "Num. Frame": 1100, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 1300, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 1300, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 1300, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 1300, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 1300, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 1300, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 1500, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 1500, + "Frame Depth": 20.0, + "Score": 1.0 + }, + { + "Num. Frame": 1500, + "Frame Depth": 40.0, + "Score": 1.0 + }, + { + "Num. Frame": 1500, + "Frame Depth": 60.0, + "Score": 1.0 + }, + { + "Num. Frame": 1500, + "Frame Depth": 80.0, + "Score": 1.0 + }, + { + "Num. Frame": 1500, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 1700, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 1700, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 1700, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 1700, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 1900, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 1900, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 1900, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 1900, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 1900, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 1900, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 2100, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 2100, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 2100, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 2100, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 2300, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 2300, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 2300, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 2300, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 2500, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 2500, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 2500, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 2500, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 2700, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 2700, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 2700, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 2700, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 2900, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 2900, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 2900, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 2900, + "Frame Depth": 100.0, + "Score": 1.0 + } +] \ No newline at end of file diff --git a/vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt b/vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt new file mode 100644 index 0000000000000000000000000000000000000000..171e02c589d3a39e4f3561a649fffb709ed78e46 --- /dev/null +++ b/vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt @@ -0,0 +1 @@ +Average Accuracy: 0.9111111111111111 diff --git a/vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/heatmap.png b/vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/heatmap.png new file mode 100644 index 0000000000000000000000000000000000000000..ba42fa719dc8fae93e709ee90b3d25196aa1d0ba --- /dev/null +++ b/vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/heatmap.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94268d656f4445e6605bacc60e33724e59c0424cff96d20867574459d9481f26 +size 42309 diff --git a/vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json b/vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json new file mode 100644 index 0000000000000000000000000000000000000000..c81a0ce7cc460ecee287413c3eede002e9390eb5 --- /dev/null +++ b/vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json @@ -0,0 +1,452 @@ +[ + { + "Num. Frame": 100, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 100, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 100, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 100, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 100, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 100, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 300, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 300, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 300, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 300, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 300, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 300, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 500, + "Frame Depth": 0.0, + "Score": 0.6 + }, + { + "Num. Frame": 500, + "Frame Depth": 20.0, + "Score": 0.4 + }, + { + "Num. Frame": 500, + "Frame Depth": 40.0, + "Score": 0.4 + }, + { + "Num. Frame": 500, + "Frame Depth": 60.0, + "Score": 0.4 + }, + { + "Num. Frame": 500, + "Frame Depth": 80.0, + "Score": 0.4 + }, + { + "Num. Frame": 500, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 700, + "Frame Depth": 0.0, + "Score": 0.2 + }, + { + "Num. Frame": 700, + "Frame Depth": 20.0, + "Score": 0.2 + }, + { + "Num. Frame": 700, + "Frame Depth": 40.0, + "Score": 0.2 + }, + { + "Num. Frame": 700, + "Frame Depth": 60.0, + "Score": 0.2 + }, + { + "Num. Frame": 700, + "Frame Depth": 80.0, + "Score": 0.2 + }, + { + "Num. Frame": 700, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 900, + "Frame Depth": 0.0, + "Score": 0.8 + }, + { + "Num. Frame": 900, + "Frame Depth": 20.0, + "Score": 0.6 + }, + { + "Num. Frame": 900, + "Frame Depth": 40.0, + "Score": 0.6 + }, + { + "Num. Frame": 900, + "Frame Depth": 60.0, + "Score": 0.6 + }, + { + "Num. Frame": 900, + "Frame Depth": 80.0, + "Score": 0.6 + }, + { + "Num. Frame": 900, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 1100, + "Frame Depth": 0.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 20.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 40.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 60.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 80.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 100.0, + "Score": 0.8 + }, + { + "Num. Frame": 1300, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 1300, + "Frame Depth": 20.0, + "Score": 0.4 + }, + { + "Num. Frame": 1300, + "Frame Depth": 40.0, + "Score": 0.4 + }, + { + "Num. Frame": 1300, + "Frame Depth": 60.0, + "Score": 0.4 + }, + { + "Num. Frame": 1300, + "Frame Depth": 80.0, + "Score": 0.4 + }, + { + "Num. Frame": 1300, + "Frame Depth": 100.0, + "Score": 0.6 + }, + { + "Num. Frame": 1500, + "Frame Depth": 0.0, + "Score": 0.4 + }, + { + "Num. Frame": 1500, + "Frame Depth": 20.0, + "Score": 0.2 + }, + { + "Num. Frame": 1500, + "Frame Depth": 40.0, + "Score": 0.2 + }, + { + "Num. Frame": 1500, + "Frame Depth": 60.0, + "Score": 0.2 + }, + { + "Num. Frame": 1500, + "Frame Depth": 80.0, + "Score": 0.2 + }, + { + "Num. Frame": 1500, + "Frame Depth": 100.0, + "Score": 0.2 + }, + { + "Num. Frame": 1700, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 100.0, + "Score": 0.0 + }, + { + "Num. Frame": 1900, + "Frame Depth": 0.0, + "Score": 0.2 + }, + { + "Num. Frame": 1900, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 1900, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 1900, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 1900, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 1900, + "Frame Depth": 100.0, + "Score": 0.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 100.0, + "Score": 0.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 100.0, + "Score": 0.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 100.0, + "Score": 0.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 100.0, + "Score": 0.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 100.0, + "Score": 0.0 + } +] \ No newline at end of file diff --git a/vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt b/vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt new file mode 100644 index 0000000000000000000000000000000000000000..7c20e4c0ab9a095387d0432500cd45e07754bfed --- /dev/null +++ b/vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt @@ -0,0 +1 @@ +Average Accuracy: 0.29333333333333333 diff --git a/vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png b/vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png new file mode 100644 index 0000000000000000000000000000000000000000..6d25a7ce1a56e47474710b776aabe25e76ba1f69 --- /dev/null +++ b/vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5af1e9e2b48fed6fa209ace235c1d5c63715054c999289219263faaca76f5d97 +size 42561 diff --git a/vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json b/vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json new file mode 100644 index 0000000000000000000000000000000000000000..ee1fc70eab505e672e5b0e3a96e8011b664483c8 --- /dev/null +++ b/vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json @@ -0,0 +1,452 @@ +[ + { + "Num. Frame": 100, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 100, + "Frame Depth": 20.0, + "Score": 0.8 + }, + { + "Num. Frame": 100, + "Frame Depth": 40.0, + "Score": 0.8 + }, + { + "Num. Frame": 100, + "Frame Depth": 60.0, + "Score": 0.8 + }, + { + "Num. Frame": 100, + "Frame Depth": 80.0, + "Score": 0.8 + }, + { + "Num. Frame": 100, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 300, + "Frame Depth": 0.0, + "Score": 1.0 + }, + { + "Num. Frame": 300, + "Frame Depth": 20.0, + "Score": 0.6 + }, + { + "Num. Frame": 300, + "Frame Depth": 40.0, + "Score": 0.6 + }, + { + "Num. Frame": 300, + "Frame Depth": 60.0, + "Score": 0.6 + }, + { + "Num. Frame": 300, + "Frame Depth": 80.0, + "Score": 0.6 + }, + { + "Num. Frame": 300, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 500, + "Frame Depth": 0.0, + "Score": 0.8 + }, + { + "Num. Frame": 500, + "Frame Depth": 20.0, + "Score": 0.6 + }, + { + "Num. Frame": 500, + "Frame Depth": 40.0, + "Score": 0.6 + }, + { + "Num. Frame": 500, + "Frame Depth": 60.0, + "Score": 0.6 + }, + { + "Num. Frame": 500, + "Frame Depth": 80.0, + "Score": 0.6 + }, + { + "Num. Frame": 500, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 700, + "Frame Depth": 0.0, + "Score": 0.4 + }, + { + "Num. Frame": 700, + "Frame Depth": 20.0, + "Score": 0.4 + }, + { + "Num. Frame": 700, + "Frame Depth": 40.0, + "Score": 0.4 + }, + { + "Num. Frame": 700, + "Frame Depth": 60.0, + "Score": 0.4 + }, + { + "Num. Frame": 700, + "Frame Depth": 80.0, + "Score": 0.4 + }, + { + "Num. Frame": 700, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 900, + "Frame Depth": 0.0, + "Score": 0.6 + }, + { + "Num. Frame": 900, + "Frame Depth": 20.0, + "Score": 0.6 + }, + { + "Num. Frame": 900, + "Frame Depth": 40.0, + "Score": 0.6 + }, + { + "Num. Frame": 900, + "Frame Depth": 60.0, + "Score": 0.6 + }, + { + "Num. Frame": 900, + "Frame Depth": 80.0, + "Score": 0.6 + }, + { + "Num. Frame": 900, + "Frame Depth": 100.0, + "Score": 1.0 + }, + { + "Num. Frame": 1100, + "Frame Depth": 0.0, + "Score": 0.6 + }, + { + "Num. Frame": 1100, + "Frame Depth": 20.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 40.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 60.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 80.0, + "Score": 0.4 + }, + { + "Num. Frame": 1100, + "Frame Depth": 100.0, + "Score": 0.8 + }, + { + "Num. Frame": 1300, + "Frame Depth": 0.0, + "Score": 0.4 + }, + { + "Num. Frame": 1300, + "Frame Depth": 20.0, + "Score": 0.4 + }, + { + "Num. Frame": 1300, + "Frame Depth": 40.0, + "Score": 0.4 + }, + { + "Num. Frame": 1300, + "Frame Depth": 60.0, + "Score": 0.4 + }, + { + "Num. Frame": 1300, + "Frame Depth": 80.0, + "Score": 0.4 + }, + { + "Num. Frame": 1300, + "Frame Depth": 100.0, + "Score": 0.6 + }, + { + "Num. Frame": 1500, + "Frame Depth": 0.0, + "Score": 0.2 + }, + { + "Num. Frame": 1500, + "Frame Depth": 20.0, + "Score": 0.2 + }, + { + "Num. Frame": 1500, + "Frame Depth": 40.0, + "Score": 0.2 + }, + { + "Num. Frame": 1500, + "Frame Depth": 60.0, + "Score": 0.2 + }, + { + "Num. Frame": 1500, + "Frame Depth": 80.0, + "Score": 0.2 + }, + { + "Num. Frame": 1500, + "Frame Depth": 100.0, + "Score": 0.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 1700, + "Frame Depth": 100.0, + "Score": 0.0 + }, + { + "Num. Frame": 1900, + "Frame Depth": 0.0, + "Score": 0.2 + }, + { + "Num. Frame": 1900, + "Frame Depth": 20.0, + "Score": 0.2 + }, + { + "Num. Frame": 1900, + "Frame Depth": 40.0, + "Score": 0.2 + }, + { + "Num. Frame": 1900, + "Frame Depth": 60.0, + "Score": 0.2 + }, + { + "Num. Frame": 1900, + "Frame Depth": 80.0, + "Score": 0.2 + }, + { + "Num. Frame": 1900, + "Frame Depth": 100.0, + "Score": 0.2 + }, + { + "Num. Frame": 2100, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 2100, + "Frame Depth": 100.0, + "Score": 0.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 2300, + "Frame Depth": 100.0, + "Score": 0.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 2500, + "Frame Depth": 100.0, + "Score": 0.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 2700, + "Frame Depth": 100.0, + "Score": 0.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 0.0, + "Score": 0.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 20.0, + "Score": 0.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 40.0, + "Score": 0.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 60.0, + "Score": 0.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 80.0, + "Score": 0.0 + }, + { + "Num. Frame": 2900, + "Frame Depth": 100.0, + "Score": 0.0 + } +] \ No newline at end of file diff --git a/vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt b/vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt new file mode 100644 index 0000000000000000000000000000000000000000..b510b5266e107101ae322472d11d8ccda19089ab --- /dev/null +++ b/vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt @@ -0,0 +1 @@ +Average Accuracy: 0.31777777777777777 diff --git a/vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png b/vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png new file mode 100644 index 0000000000000000000000000000000000000000..130954383e6b20814cdd4850dbd359fb7b24a672 --- /dev/null +++ b/vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c382372c23f66c2707683b72e252ae080c2f953a9c77f525a72730deb470565b +size 42578 diff --git a/vision_niah_d/produce_haystack_embedding.py b/vision_niah_d/produce_haystack_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..cbe7d3d75f612c0ae0fd5fb590655278cfdea6ce --- /dev/null +++ b/vision_niah_d/produce_haystack_embedding.py @@ -0,0 +1,142 @@ +from qwen_vl_utils import process_vision_info +from decord import VideoReader, cpu +import argparse +import os +import numpy as np +from tqdm import tqdm +import torch +import transformers +import math +from PIL import Image +from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor +from torchvision import io, transforms +from torchvision.transforms import InterpolationMode +IMAGE_FACTOR = 28 +MIN_PIXELS = 144 * 28 * 28 +MAX_PIXELS = 144 * 28 * 28 +MAX_RATIO = 200 +def load_video_batches(video_path, batch_size): + global args + vr = VideoReader(video_path, ctx=cpu(0)) + total_frame_num = len(vr) + fps = round(vr.get_avg_fps()) + frame_idx = [i for i in range(0, len(vr), fps)] + for start_idx in range(0, len(frame_idx), batch_size): + end_idx = min(start_idx + batch_size, total_frame_num) + frame_indices = frame_idx[start_idx:end_idx] + batch_frames = vr.get_batch(frame_indices).asnumpy() + batch_frames = torch.tensor(batch_frames).permute(0, 3, 1, 2) + # import pdb; pdb.set_trace() + nframes, _, height, width = batch_frames.shape + # if torch.unique(batch_frames).numel() == 1: + # batch_frames.fill_(args.v) + # print(torch.unique(batch_frames).item()) + resized_height, resized_width = 252, 448 + # resized_height, resized_width = smart_resize( + # height, + # width, + # factor=IMAGE_FACTOR, + # min_pixels=MIN_PIXELS, + # max_pixels=MAX_PIXELS, + # ) + batch_frames = transforms.functional.resize( + batch_frames, + [resized_height, resized_width], + interpolation=InterpolationMode.BICUBIC, + antialias=True, + ).float() + + yield batch_frames + +def round_by_factor(number: int, factor: int) -> int: + """Returns the closest integer to 'number' that is divisible by 'factor'.""" + return round(number / factor) * factor + + +def ceil_by_factor(number: int, factor: int) -> int: + """Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'.""" + return math.ceil(number / factor) * factor + + +def floor_by_factor(number: int, factor: int) -> int: + """Returns the largest integer less than or equal to 'number' that is divisible by 'factor'.""" + return math.floor(number / factor) * factor +def smart_resize( + height: int, width: int, factor: int = IMAGE_FACTOR, min_pixels: int = MIN_PIXELS, max_pixels: int = MAX_PIXELS +) -> tuple[int, int]: + """ + Rescales the image so that the following conditions are met: + + 1. Both dimensions (height and width) are divisible by 'factor'. + + 2. The total number of pixels is within the range ['min_pixels', 'max_pixels']. + + 3. The aspect ratio of the image is maintained as closely as possible. + """ + if max(height, width) / min(height, width) > MAX_RATIO: + raise ValueError( + f"absolute aspect ratio must be smaller than {MAX_RATIO}, got {max(height, width) / min(height, width)}" + ) + h_bar = max(factor, round_by_factor(height, factor)) + w_bar = max(factor, round_by_factor(width, factor)) + if h_bar * w_bar > max_pixels: + beta = math.sqrt((height * width) / max_pixels) + h_bar = floor_by_factor(height / beta, factor) + w_bar = floor_by_factor(width / beta, factor) + elif h_bar * w_bar < min_pixels: + beta = math.sqrt(min_pixels / (height * width)) + h_bar = ceil_by_factor(height * beta, factor) + w_bar = ceil_by_factor(width * beta, factor) + return h_bar, w_bar + +def main(args): + video_path = args.video_path + model_path = args.model + model_name = "llava_qwen" + + model = Qwen2VLForConditionalGeneration.from_pretrained(model_path, + device_map="auto", + torch_dtype=torch.bfloat16, + attn_implementation="flash_attention_2" + ) + processor = AutoProcessor.from_pretrained("/mnt/petrelfs/weixilin/cache/Qwen2-VL-7B-Instruct") + del model.model.layers + # Process video in batches + batch_size = 32 + total_batches = (args.sampled_frames_num + batch_size - 1) // batch_size + image_feature_list = [] + if args.add_newline_token: + newline_token_embeddong = model.model.image_newline + with torch.inference_mode(): + for i, video_batch in tqdm(enumerate(load_video_batches(video_path, batch_size)), total=total_batches, desc="Processing Video Batches"): + v_test = processor.image_processor(images=None, videos=video_batch) + merge_length = processor.image_processor.merge_size**2 + pixel_values_videos,video_grid_thw=torch.from_numpy(v_test['pixel_values_videos']), torch.from_numpy(v_test['video_grid_thw']).to(model.device) + # if i > 30: + # import pdb; pdb.set_trace() + print(video_grid_thw) + # import pdb; pdb.set_trace() + pixel_values_videos = pixel_values_videos.type(model.visual.get_dtype()).to(model.device) + video_embeds = model.visual(pixel_values_videos, grid_thw=video_grid_thw).to(model.device) + + print(video_embeds.shape) + if args.add_newline_token: + image_features = torch.cat([image_features, newline_token_embeddong.unsqueeze(0).expand(image_features.shape[0], 1, -1)], dim=1) + image_feature_list.append(video_embeds.to(torch.bfloat16).to("cpu")) + if i > total_batches: + break + image_feature_list = torch.cat(image_feature_list, dim=0) + os.makedirs(args.output_dir, exist_ok=True) + torch.save(image_feature_list, f"{args.output_dir}/video_embeddings.pt") + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model", type=str, default="/mnt/petrelfs/weixilin/cache/Qwen2-VL-7B-Instruct") + # parser.add_argument("--v", type=int, default=255) + parser.add_argument("--video_path", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/LongVA/asset/videos/movie.mp4") + parser.add_argument("--sampled_frames_num", type=int, default=6000) + parser.add_argument("--output_dir", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/video_needle_haystack/data/haystack_vicuna_embeddings_6000frames-tune_projector") + parser.add_argument("--pooling_size", type=int, default=0) + parser.add_argument("--add_newline_token", action="store_true") + args = parser.parse_args() + main(args) diff --git a/vision_niah_d/produce_needle_embedding.py b/vision_niah_d/produce_needle_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..3e09d4b6deae6d36648fdfc751fcf241658d7a4e --- /dev/null +++ b/vision_niah_d/produce_needle_embedding.py @@ -0,0 +1,163 @@ +from qwen_vl_utils import process_vision_info +from decord import VideoReader, cpu +import argparse +import numpy as np +from tqdm import tqdm +import torch +import transformers +import math +from PIL import Image +from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor +from torchvision import io, transforms +from torchvision.transforms import InterpolationMode +import os +import json +IMAGE_FACTOR = 28 +MIN_PIXELS = 144 * 28 * 28 +MAX_PIXELS = 144 * 28 * 28 +MAX_RATIO = 200 + +def round_by_factor(number: int, factor: int) -> int: + """Returns the closest integer to 'number' that is divisible by 'factor'.""" + return round(number / factor) * factor + + +def ceil_by_factor(number: int, factor: int) -> int: + """Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'.""" + return math.ceil(number / factor) * factor + + +def floor_by_factor(number: int, factor: int) -> int: + """Returns the largest integer less than or equal to 'number' that is divisible by 'factor'.""" + return math.floor(number / factor) * factor +def smart_resize( + height: int, width: int, factor: int = IMAGE_FACTOR, min_pixels: int = MIN_PIXELS, max_pixels: int = MAX_PIXELS +) -> tuple[int, int]: + """ + Rescales the image so that the following conditions are met: + + 1. Both dimensions (height and width) are divisible by 'factor'. + + 2. The total number of pixels is within the range ['min_pixels', 'max_pixels']. + + 3. The aspect ratio of the image is maintained as closely as possible. + """ + if max(height, width) / min(height, width) > MAX_RATIO: + raise ValueError( + f"absolute aspect ratio must be smaller than {MAX_RATIO}, got {max(height, width) / min(height, width)}" + ) + h_bar = max(factor, round_by_factor(height, factor)) + w_bar = max(factor, round_by_factor(width, factor)) + if h_bar * w_bar > max_pixels: + beta = math.sqrt((height * width) / max_pixels) + h_bar = floor_by_factor(height / beta, factor) + w_bar = floor_by_factor(width / beta, factor) + elif h_bar * w_bar < min_pixels: + beta = math.sqrt(min_pixels / (height * width)) + h_bar = ceil_by_factor(height * beta, factor) + w_bar = ceil_by_factor(width * beta, factor) + return h_bar, w_bar + +def read_json_file(file_path): + """ + 读取JSON文件并返回数据作为字典。 + + 参数: + file_path (str): JSON文件的路径。 + + 返回: + dict: JSON文件中的数据。 + """ + try: + # 打开文件并读取数据 + with open(file_path, 'r', encoding='utf-8') as file: + # 将JSON数据解析为字典 + data = json.load(file) + return data + except FileNotFoundError: + print(f"The file {file_path} was not found.") + except json.JSONDecodeError: + print(f"Error decoding JSON from file {file_path}.") + except Exception as e: + print(f"An error occurred: {e}") + +def fetch_image(ele, size_factor: int = IMAGE_FACTOR) -> Image.Image: + if "image" in ele: + image = ele["image"] + else: + image = ele["image_url"] + image_obj = None + if isinstance(image, Image.Image): + image_obj = image + elif image.startswith("http://") or image.startswith("https://"): + image_obj = Image.open(requests.get(image, stream=True).raw) + elif image.startswith("file://"): + image_obj = Image.open(image[7:]) + elif image.startswith("data:image"): + if "base64," in image: + _, base64_data = image.split("base64,", 1) + data = base64.b64decode(base64_data) + image_obj = Image.open(BytesIO(data)) + else: + image_obj = Image.open(image) + if image_obj is None: + raise ValueError(f"Unrecognized image input, support local path, http url, base64 and PIL.Image, got {image}") + image = image_obj.convert("RGB") + ## resize + if "resized_height" in ele and "resized_width" in ele: + resized_height, resized_width = 252, 448 + # resized_height, resized_width = smart_resize( + # ele["resized_height"], + # ele["resized_width"], + # factor=size_factor, + # ) + else: + width, height = image.size + min_pixels = ele.get("min_pixels", MIN_PIXELS) + max_pixels = ele.get("max_pixels", MAX_PIXELS) + # resized_height, resized_width = smart_resize( + # height, + # width, + # factor=size_factor, + # min_pixels=min_pixels, + # max_pixels=max_pixels, + # ) + resized_height, resized_width = 252, 448 + image = image.resize((resized_width, resized_height)) + + return image +def main(args): + model_path = args.model + model = Qwen2VLForConditionalGeneration.from_pretrained(model_path, + device_map="auto", + torch_dtype=torch.bfloat16, + attn_implementation="flash_attention_2" + ) + processor = AutoProcessor.from_pretrained("/mnt/petrelfs/weixilin/cache/Qwen2-VL-7B-Instruct") + del model.model.layers + # dataset = load_dataset(args.needle_dataset)["test"] + # dataset = load_dataset('json', '/mnt/petrelfs/weixilin/projects/MLLM/LongVA/vision_niah/needle_datasets/dataset.json') + dataset = read_json_file(args.needle_dataset) + for index, instance in enumerate(dataset): + + # image = instance["image"].convert("RGB") + img = fetch_image({"image": os.path.join('/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/needle_datasets/images', instance['path']), "resized_height": 252, "resized_width": 448}) + image_single = processor.image_processor(images=[img], videos=None) + merge_length = processor.image_processor.merge_size**2 + pixel_values, image_grid_thw=torch.from_numpy(image_single['pixel_values']), torch.from_numpy(image_single['image_grid_thw']).to(model.device) + # import pdb; pdb.set_trace() + pixel_values = pixel_values.type(model.visual.get_dtype()).to(model.device) + image_embed = model.visual(pixel_values, grid_thw=image_grid_thw).to(model.device) + print(image_embed.shape) + os.makedirs(args.output_dir, exist_ok=True) + torch.save(image_embed, f"{args.output_dir}/{index}.pt") + + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model", type=str, default="/mnt/petrelfs/weixilin/cache/Qwen2-VL-7B-Instruct") + parser.add_argument("--needle_dataset", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/needle_datasets/dataset_change_format_debug.json") + parser.add_argument("--output_dir", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/video_needle_haystack/data/needle_vicuna_embeddings_144tokens-tune_projector_interrupt_debug") + args = parser.parse_args() + main(args)