diff --git "a/log_node30.txt" "b/log_node30.txt" new file mode 100644--- /dev/null +++ "b/log_node30.txt" @@ -0,0 +1,34095 @@ ++ echo Logging output to /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//log_node30.txt +Logging output to /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//log_node30.txt ++ export ASCEND_PROCESS_LOG_PATH=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//ascend/30 ++ ASCEND_PROCESS_LOG_PATH=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//ascend/30 ++ mkdir -p /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//ascend/30 ++ DATA_PATH=/local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml ++ TOKENIZER_PATH=/data_4/models/Qwen/Qwen2.5-14B-Instruct/ ++ CKPT_LOAD_DIR=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/ ++ VIT_CKPT_LOAD_DIR=/ ++ CKPT_SAVE_DIR=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743// ++ rsync -avh /local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743/ +sending incremental file list + +sent 71 bytes received 12 bytes 166.00 bytes/sec +total size is 23.84K speedup is 287.17 ++ cd /local_disk/cognitron_vl/ ++ rm -fr datasets ++ mkdir -p datasets ++ ln -s /data/data/ datasets/CV ++ ln -s /data/data/LLM datasets/LLM ++ ln -s /data/data/LMM datasets/LMM ++ source /local_disk/cognitron_vl//scripts/set_env_mg_npu.sh +++ source /usr/local/Ascend/driver/bin/setenv.bash ++++ DEP_INFO_FILE=/etc/ascend_install.info ++++ [[ -f /etc/ascend_install.info ]] ++++ . /etc/ascend_install.info ++++ DRV_LIB64_COMMON_LDPATH=/driver/lib64/common ++++ DRV_LIB64_DRV_LDPATH=/driver/lib64/driver ++++ DRV_LIB64_LDPATH=/driver/lib64 ++++ export LD_LIBRARY_PATH=/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ LD_LIBRARY_PATH=/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ export PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin ++++ PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin +++ source /usr/local/Ascend/ascend-toolkit/set_env.sh ++++ export LD_LIBRARY_PATH=/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ LD_LIBRARY_PATH=/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ export ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest ++++ ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest +++++ arch ++++ export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ export PYTHONPATH=/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: ++++ PYTHONPATH=/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: ++++ export PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin ++++ PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin ++++ export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest ++++ ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest ++++ export ASCEND_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp ++++ ASCEND_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp ++++ export TOOLCHAIN_HOME=/usr/local/Ascend/ascend-toolkit/latest/toolkit ++++ TOOLCHAIN_HOME=/usr/local/Ascend/ascend-toolkit/latest/toolkit ++++ export ASCEND_HOME_PATH=/usr/local/Ascend/ascend-toolkit/latest ++++ ASCEND_HOME_PATH=/usr/local/Ascend/ascend-toolkit/latest +++ export HCCL_CONNECT_TIMEOUT=7200 +++ HCCL_CONNECT_TIMEOUT=7200 +++ export HCCL_EXEC_TIMEOUT=7200 +++ HCCL_EXEC_TIMEOUT=7200 +++ export COMBINED_ENABLE=1 +++ COMBINED_ENABLE=1 +++ export MULTI_STREAM_MEMORY_REUSE=1 +++ MULTI_STREAM_MEMORY_REUSE=1 +++ export HCCL_RDMA_TC=160 +++ HCCL_RDMA_TC=160 +++ export HCCL_RDMA_SL=5 +++ HCCL_RDMA_SL=5 +++ export HCCL_INTRA_PCIE_ENABLE=0 +++ HCCL_INTRA_PCIE_ENABLE=0 +++ export HCCL_INTRA_ROCE_ENABLE=1 +++ HCCL_INTRA_ROCE_ENABLE=1 +++ export HCCL_RDMA_TIMEOUT=20 +++ HCCL_RDMA_TIMEOUT=20 +++ export INF_NAN_MODE_ENABLE=1 +++ INF_NAN_MODE_ENABLE=1 +++ export DISTRIBUTED_BACKEND=hccl +++ DISTRIBUTED_BACKEND=hccl +++ export ASCEND_LAUNCH_BLOCKING=0 +++ ASCEND_LAUNCH_BLOCKING=0 +++ export ASCEND_SLOG_PRINT_TO_STDOUT=0 +++ ASCEND_SLOG_PRINT_TO_STDOUT=0 +++ export ASCEND_GLOBAL_LOG_LEVEL=3 +++ ASCEND_GLOBAL_LOG_LEVEL=3 +++ export ASCEND_GLOBAL_EVENT_ENABLE=0 +++ ASCEND_GLOBAL_EVENT_ENABLE=0 +++ export TASK_QUEUE_ENABLE=1 +++ TASK_QUEUE_ENABLE=1 +++ export PTCOPY_ENABLE=1 +++ PTCOPY_ENABLE=1 +++ export COMBINED_ENABLE=1 +++ COMBINED_ENABLE=1 +++ export DYNAMIC_OP=ADD#MUL +++ DYNAMIC_OP=ADD#MUL +++ export HCCL_WHITELIST_DISABLE=1 +++ HCCL_WHITELIST_DISABLE=1 +++ export HCCL_CONNECT_TIMEOUT=7200 +++ HCCL_CONNECT_TIMEOUT=7200 +++ export HCCL_WHITELIST_DISABLE=1 +++ HCCL_WHITELIST_DISABLE=1 +++ export CUDA_DEVICE_MAX_CONNECTIONS=1 +++ CUDA_DEVICE_MAX_CONNECTIONS=1 +++ pip3 install --no-index --find-links=/data/software/ -r requirements_npu.txt +Looking in links: /data/software/ +Processing data/software/expecttest-0.2.1-py3-none-any.whl (from -r requirements_npu.txt (line 1)) +Requirement already satisfied: peft in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 2)) (0.7.0) +Processing data/software/XlsxWriter-3.2.0-py3-none-any.whl (from -r requirements_npu.txt (line 3)) +Requirement already satisfied: termcolor in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 4)) (2.4.0) +Requirement already satisfied: tabulate in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 5)) (0.9.0) +Processing data/software/tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from -r requirements_npu.txt (line 6)) +Requirement already satisfied: matplotlib in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 7)) (3.7.5) +Processing data/software/datasets-3.0.0-py3-none-any.whl (from -r requirements_npu.txt (line 8)) +Requirement already satisfied: einops in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 9)) (0.7.0) +Processing data/software/pybind11-2.13.6-py3-none-any.whl (from -r requirements_npu.txt (line 10)) +Requirement already satisfied: tensorboardX in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 11)) (2.6.2.2) +Processing data/software/pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from -r requirements_npu.txt (line 12)) +Requirement already satisfied: transformers>=4.40.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 13)) (4.40.1) +Requirement already satisfied: deepspeed>=0.14.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 14)) (0.14.5) +Processing data/software/accelerate-0.34.2-py3-none-any.whl (from -r requirements_npu.txt (line 15)) +Requirement already satisfied: timm in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 16)) (0.9.16) +Processing data/software/flask-3.0.3-py3-none-any.whl (from -r requirements_npu.txt (line 17)) +Processing data/software/Flask_RESTful-0.3.10-py2.py3-none-any.whl (from -r requirements_npu.txt (line 18)) +Processing data/software/decord-0.6.0-py3-none-manylinux2010_x86_64.whl (from -r requirements_npu.txt (line 19)) +Processing data/software/natsort-8.4.0-py3-none-any.whl (from -r requirements_npu.txt (line 20)) +Requirement already satisfied: numpy>=1.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (1.24.4) +Requirement already satisfied: packaging>=20.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (23.2) +Requirement already satisfied: psutil in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (5.9.8) +Requirement already satisfied: pyyaml in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (5.4.1) +Requirement already satisfied: torch>=1.13.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (2.1.0+cpu) +Requirement already satisfied: tqdm in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (4.66.2) +Requirement already satisfied: safetensors in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (0.4.2) +Requirement already satisfied: huggingface-hub>=0.17.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (0.20.3) +Requirement already satisfied: regex>=2022.1.18 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from tiktoken->-r requirements_npu.txt (line 6)) (2023.12.25) +Requirement already satisfied: requests>=2.26.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from tiktoken->-r requirements_npu.txt (line 6)) (2.31.0) +Requirement already satisfied: contourpy>=1.0.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (1.1.1) +Requirement already satisfied: cycler>=0.10 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (0.12.1) +Requirement already satisfied: fonttools>=4.22.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (4.49.0) +Requirement already satisfied: kiwisolver>=1.0.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (1.4.5) +Requirement already satisfied: pillow>=6.2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (10.2.0) +Requirement already satisfied: pyparsing>=2.3.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (3.1.1) +Requirement already satisfied: python-dateutil>=2.7 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (2.8.2) +Requirement already satisfied: importlib-resources>=3.2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (6.1.2) +Requirement already satisfied: filelock in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (3.13.1) +Requirement already satisfied: dill<0.3.9,>=0.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (0.3.7) +Requirement already satisfied: pandas in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (2.0.3) +Processing data/software/requests-2.32.3-py3-none-any.whl (from tiktoken->-r requirements_npu.txt (line 6)) +Processing data/software/tqdm-4.67.1-py3-none-any.whl (from peft->-r requirements_npu.txt (line 2)) +Requirement already satisfied: xxhash in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (3.4.1) +Requirement already satisfied: multiprocess in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (0.70.15) +Requirement already satisfied: fsspec<=2024.6.1,>=2023.1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from fsspec[http]<=2024.6.1,>=2023.1.0->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (2023.10.0) +Requirement already satisfied: aiohttp in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (3.9.3) +Processing data/software/huggingface_hub-0.26.2-py3-none-any.whl (from peft->-r requirements_npu.txt (line 2)) +Requirement already satisfied: protobuf>=3.20 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from tensorboardX->-r requirements_npu.txt (line 11)) (4.25.3) +Requirement already satisfied: tokenizers<0.20,>=0.19 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers>=4.40.1->-r requirements_npu.txt (line 13)) (0.19.1) +Requirement already satisfied: hjson in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (3.1.0) +Requirement already satisfied: ninja in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (1.11.1.1) +Requirement already satisfied: nvidia-ml-py in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (12.560.30) +Requirement already satisfied: py-cpuinfo in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (9.0.0) +Requirement already satisfied: pydantic in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (1.10.15) +Processing data/software/safetensors-0.4.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from peft->-r requirements_npu.txt (line 2)) +Requirement already satisfied: torchvision in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from timm->-r requirements_npu.txt (line 16)) (0.16.0) +Requirement already satisfied: Werkzeug>=3.0.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (3.0.1) +Requirement already satisfied: Jinja2>=3.1.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (3.1.3) +Processing data/software/itsdangerous-2.2.0-py3-none-any.whl (from flask->-r requirements_npu.txt (line 17)) +Requirement already satisfied: click>=8.1.3 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (8.1.7) +Processing data/software/blinker-1.8.2-py3-none-any.whl (from flask->-r requirements_npu.txt (line 17)) +Requirement already satisfied: importlib-metadata>=3.6.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (7.0.1) +Processing data/software/aniso8601-9.0.1-py2.py3-none-any.whl (from flask_restful->-r requirements_npu.txt (line 18)) +Requirement already satisfied: six>=1.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask_restful->-r requirements_npu.txt (line 18)) (1.16.0) +Requirement already satisfied: pytz in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask_restful->-r requirements_npu.txt (line 18)) (2024.1) +Requirement already satisfied: aiosignal>=1.1.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (1.3.1) +Requirement already satisfied: attrs>=17.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (23.2.0) +Requirement already satisfied: frozenlist>=1.1.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (1.4.1) +Requirement already satisfied: multidict<7.0,>=4.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (6.0.5) +Requirement already satisfied: yarl<2.0,>=1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (1.9.4) +Requirement already satisfied: async-timeout<5.0,>=4.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (4.0.3) +Requirement already satisfied: typing-extensions>=3.7.4.3 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from huggingface-hub>=0.17.0->peft->-r requirements_npu.txt (line 2)) (4.10.0) +Requirement already satisfied: zipp>=0.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from importlib-metadata>=3.6.0->flask->-r requirements_npu.txt (line 17)) (3.17.0) +Requirement already satisfied: MarkupSafe>=2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from Jinja2>=3.1.2->flask->-r requirements_npu.txt (line 17)) (2.1.5) +Requirement already satisfied: charset-normalizer<4,>=2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (3.3.2) +Requirement already satisfied: idna<4,>=2.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (3.6) +Requirement already satisfied: urllib3<3,>=1.21.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (1.26.18) +Requirement already satisfied: certifi>=2017.4.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (2024.2.2) +Requirement already satisfied: sympy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft->-r requirements_npu.txt (line 2)) (1.4) +Requirement already satisfied: networkx in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft->-r requirements_npu.txt (line 2)) (3.1) +Requirement already satisfied: tzdata>=2022.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (2024.1) +Requirement already satisfied: mpmath>=0.19 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from sympy->torch>=1.13.0->peft->-r requirements_npu.txt (line 2)) (1.3.0) +DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 +Installing collected packages: aniso8601, xlsxwriter, tqdm, safetensors, requests, pybind11, pyarrow, natsort, itsdangerous, expecttest, decord, blinker, tiktoken, huggingface-hub, flask, flask_restful, accelerate, datasets + Attempting uninstall: tqdm + Found existing installation: tqdm 4.66.2 + Uninstalling tqdm-4.66.2: + Successfully uninstalled tqdm-4.66.2 + Attempting uninstall: safetensors + Found existing installation: safetensors 0.4.2 + Uninstalling safetensors-0.4.2: + Successfully uninstalled safetensors-0.4.2 + Attempting uninstall: requests + Found existing installation: requests 2.31.0 + Uninstalling requests-2.31.0: + Successfully uninstalled requests-2.31.0 + Attempting uninstall: pyarrow + Found existing installation: pyarrow 15.0.0 + Uninstalling pyarrow-15.0.0: + Successfully uninstalled pyarrow-15.0.0 + Attempting uninstall: huggingface-hub + Found existing installation: huggingface-hub 0.20.3 + Uninstalling huggingface-hub-0.20.3: + Successfully uninstalled huggingface-hub-0.20.3 + Attempting uninstall: accelerate + Found existing installation: accelerate 0.25.0 + Uninstalling accelerate-0.25.0: + Successfully uninstalled accelerate-0.25.0 + Attempting uninstall: datasets + Found existing installation: datasets 2.16.0 + Uninstalling datasets-2.16.0: + Successfully uninstalled datasets-2.16.0 +ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. +tikit 1.8.2.240926 requires dicttoxml==1.7.4, which is not installed. +tikit 1.8.2.240926 requires docopt==0.6.2, which is not installed. +tikit 1.8.2.240926 requires future==0.18.2, which is not installed. +tikit 1.8.2.240926 requires hdfs==2.6.0, which is not installed. +tikit 1.8.2.240926 requires pure-sasl==0.6.2, which is not installed. +tikit 1.8.2.240926 requires py4j==0.10.7, which is not installed. +tikit 1.8.2.240926 requires PyHive[hive]==0.6.4, which is not installed. +tikit 1.8.2.240926 requires pyjwt>=2.4.0, which is not installed. +tikit 1.8.2.240926 requires requests-kerberos>=0.14.0, which is not installed. +tikit 1.8.2.240926 requires sasl==0.3.1, which is not installed. +tikit 1.8.2.240926 requires thrift==0.15.0, which is not installed. +tikit 1.8.2.240926 requires thrift-sasl>=0.1.0, which is not installed. +tikit 1.8.2.240926 requires certifi==2021.10.8, but you have certifi 2024.2.2 which is incompatible. +tikit 1.8.2.240926 requires cos-python-sdk-v5==1.9.29, but you have cos-python-sdk-v5 1.9.26 which is incompatible. +tikit 1.8.2.240926 requires idna==3.3, but you have idna 3.6 which is incompatible. +tikit 1.8.2.240926 requires prettytable==2.5.0, but you have prettytable 3.11.0 which is incompatible. +tikit 1.8.2.240926 requires urllib3==1.26.7, but you have urllib3 1.26.18 which is incompatible. +tikit 1.8.2.240926 requires wcwidth==0.2.5, but you have wcwidth 0.2.13 which is incompatible. +Successfully installed accelerate-0.34.2 aniso8601-9.0.1 blinker-1.8.2 datasets-3.0.0 decord-0.6.0 expecttest-0.2.1 flask-3.0.3 flask_restful-0.3.10 huggingface-hub-0.26.2 itsdangerous-2.2.0 natsort-8.4.0 pyarrow-17.0.0 pybind11-2.13.6 requests-2.32.3 safetensors-0.4.5 tiktoken-0.7.0 tqdm-4.67.1 xlsxwriter-3.2.0 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv +++ return 0 ++ MEGATRON_DIR=/local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0/ ++ MINDSPEED_DIR=/local_disk/cognitron_vl//third_party/MindSpeed_core_r0.6.0/ ++ MODELLINK_DIR=/local_disk/cognitron_vl//third_party/ModelLink/ ++ pip3 install --no-index --find-links=/data/software/ -e /local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0/ +Looking in links: /data/software/ +Obtaining file://local_disk/cognitron_vl/third_party/Megatron-LM_core_r0.6.0 + Installing build dependencies: started + Installing build dependencies: finished with status 'done' + Checking if build backend supports build_editable: started + Checking if build backend supports build_editable: finished with status 'done' + Getting requirements to build editable: started + Getting requirements to build editable: finished with status 'done' + Installing backend dependencies: started + Installing backend dependencies: finished with status 'done' + Preparing editable metadata (pyproject.toml): started + Preparing editable metadata (pyproject.toml): finished with status 'done' +Building wheels for collected packages: megatron_core + Building editable for megatron_core (pyproject.toml): started + Building editable for megatron_core (pyproject.toml): finished with status 'done' + Created wheel for megatron_core: filename=megatron_core-0.6.0-0.editable-cp38-cp38-linux_x86_64.whl size=8791 sha256=1c8a73544a768ff0759eb2db03ef8e548406a6700abe057332d8072922777a16 + Stored in directory: /tmp/pip-ephem-wheel-cache-qivl7wrf/wheels/54/9c/d1/d2015aa0c34e791e64d65d19395e5a9a5528f0c63fd519b9ff +Successfully built megatron_core +DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 +Installing collected packages: megatron_core +Successfully installed megatron_core-0.6.0 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ++ pip3 install --no-index --find-links=/data/software/ -e /local_disk/cognitron_vl//third_party/MindSpeed_core_r0.6.0/ +Looking in links: /data/software/ +Obtaining file://local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0 + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +WARNING: Error parsing requirements for tokenizers: [Errno 2] No such file or directory: '/root/miniconda3/envs/py38/lib/python3.8/site-packages/tokenizers-0.19.1.dist-info/METADATA' +WARNING: Error parsing requirements for transformers: [Errno 2] No such file or directory: '/root/miniconda3/envs/py38/lib/python3.8/site-packages/transformers-4.40.1.dist-info/METADATA' +DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 +Installing collected packages: mindspeed + Running setup.py develop for mindspeed +Successfully installed mindspeed-0.6.0 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ++ pip3 install --no-index --find-links=/data/software/ -e /local_disk/cognitron_vl//third_party/ModelLink/ +Looking in links: /data/software/ +Obtaining file://local_disk/cognitron_vl/third_party/ModelLink + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +Requirement already satisfied: numpy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.24.4) +Processing data/software/transformers-4.43.2-py3-none-any.whl (from modellink==0.0.1) +Processing data/software/transformers-stream-generator-0.0.5.tar.gz (from modellink==0.0.1) + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +Requirement already satisfied: sympy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.4) +Requirement already satisfied: decorator in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (5.1.1) +Requirement already satisfied: scipy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.10.1) +Requirement already satisfied: sentencepiece in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.2.0) +Requirement already satisfied: einops in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.7.0) +Requirement already satisfied: datasets in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (3.0.0) +Requirement already satisfied: pybind11 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (2.13.6) +Requirement already satisfied: accelerate in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.34.2) +Requirement already satisfied: six in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.16.0) +Requirement already satisfied: protobuf in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (4.25.3) +Processing data/software/peft-0.7.1-py3-none-any.whl (from modellink==0.0.1) +Requirement already satisfied: tiktoken in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.7.0) +Requirement already satisfied: packaging>=20.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (23.2) +Requirement already satisfied: psutil in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (5.9.8) +Requirement already satisfied: pyyaml in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (5.4.1) +Requirement already satisfied: torch>=1.13.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (2.1.0+cpu) +Requirement already satisfied: tqdm in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (4.67.1) +Requirement already satisfied: safetensors in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (0.4.5) +Requirement already satisfied: huggingface-hub>=0.17.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (0.26.2) +Requirement already satisfied: filelock in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers==4.43.2->modellink==0.0.1) (3.13.1) +Requirement already satisfied: regex!=2019.12.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers==4.43.2->modellink==0.0.1) (2023.12.25) +Requirement already satisfied: requests in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers==4.43.2->modellink==0.0.1) (2.32.3) +Processing data/software/tokenizers-0.19.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from transformers==4.43.2->modellink==0.0.1) +Requirement already satisfied: pyarrow>=15.0.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (17.0.0) +Requirement already satisfied: dill<0.3.9,>=0.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (0.3.7) +Requirement already satisfied: pandas in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (2.0.3) +Requirement already satisfied: xxhash in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (3.4.1) +Requirement already satisfied: multiprocess in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (0.70.15) +Requirement already satisfied: fsspec<=2024.6.1,>=2023.1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from fsspec[http]<=2024.6.1,>=2023.1.0->datasets->modellink==0.0.1) (2023.10.0) +Requirement already satisfied: aiohttp in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (3.9.3) +Requirement already satisfied: mpmath>=0.19 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from sympy->modellink==0.0.1) (1.3.0) +Requirement already satisfied: aiosignal>=1.1.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (1.3.1) +Requirement already satisfied: attrs>=17.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (23.2.0) +Requirement already satisfied: frozenlist>=1.1.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (1.4.1) +Requirement already satisfied: multidict<7.0,>=4.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (6.0.5) +Requirement already satisfied: yarl<2.0,>=1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (1.9.4) +Requirement already satisfied: async-timeout<5.0,>=4.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (4.0.3) +Requirement already satisfied: typing-extensions>=3.7.4.3 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from huggingface-hub>=0.17.0->peft==0.7.1->modellink==0.0.1) (4.10.0) +Requirement already satisfied: charset-normalizer<4,>=2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (3.3.2) +Requirement already satisfied: idna<4,>=2.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (3.6) +Requirement already satisfied: urllib3<3,>=1.21.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (1.26.18) +Requirement already satisfied: certifi>=2017.4.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (2024.2.2) +Requirement already satisfied: networkx in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft==0.7.1->modellink==0.0.1) (3.1) +Requirement already satisfied: jinja2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft==0.7.1->modellink==0.0.1) (3.1.3) +Requirement already satisfied: python-dateutil>=2.8.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets->modellink==0.0.1) (2.8.2) +Requirement already satisfied: pytz>=2020.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets->modellink==0.0.1) (2024.1) +Requirement already satisfied: tzdata>=2022.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets->modellink==0.0.1) (2024.1) +Requirement already satisfied: MarkupSafe>=2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from jinja2->torch>=1.13.0->peft==0.7.1->modellink==0.0.1) (2.1.5) +Building wheels for collected packages: transformers_stream_generator + Building wheel for transformers_stream_generator (setup.py): started + Building wheel for transformers_stream_generator (setup.py): finished with status 'done' + Created wheel for transformers_stream_generator: filename=transformers_stream_generator-0.0.5-py3-none-any.whl size=12425 sha256=53a0efa1548230be4832bd2d5f76d2b932ac2ffee1961d12082c62ce27bcc265 + Stored in directory: /root/.cache/pip/wheels/56/8c/42/5381d9c36bc85f28982f4cf8f98dc44d37a6d6c04897a5cb7c +Successfully built transformers_stream_generator +DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 +Installing collected packages: tokenizers, transformers, transformers_stream_generator, peft, modellink + Attempting uninstall: tokenizers + Found existing installation: tokenizers 0.20.3 + Uninstalling tokenizers-0.20.3: + Successfully uninstalled tokenizers-0.20.3 + Attempting uninstall: transformers + Found existing installation: transformers 4.46.3 + Uninstalling transformers-4.46.3: + Successfully uninstalled transformers-4.46.3 + Attempting uninstall: peft + Found existing installation: peft 0.7.0 + Uninstalling peft-0.7.0: + Successfully uninstalled peft-0.7.0 + Running setup.py develop for modellink +Successfully installed modellink-0.0.1 peft-0.7.1 tokenizers-0.19.1 transformers-4.43.2 transformers_stream_generator-0.0.5 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ++ export PYTHONPATH=/local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0//:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: ++ PYTHONPATH=/local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0//:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: ++ GPUS_PER_NODE=16 ++ NNODES=32 ++ NODE_RANK=30 ++ MASTER_PORT=34567 ++ export CUDA_DEVICE_MAX_CONNECTIONS=1 ++ CUDA_DEVICE_MAX_CONNECTIONS=1 ++ export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True ++ PYTORCH_NPU_ALLOC_CONF=expandable_segments:True ++ VISION_SEQ_LENGTH=1025 ++ IMAGE_TOKEN_LENGTH=256 ++ IMAGE_SIZE=448 ++ VISION_MODEL_TYPE=intern_300m ++ TP=8 ++ PP=1 ++ CP=8 ++ CP_ALGO=megatron_cp_algo ++ CP_MASK=causal ++ DISTRIBUTED_ARGS=' + --nproc_per_node 16 --nnodes 32 --node_rank 30 --master_addr train-1198772881325351168-93vlj4s2getc-master-0.train-100034032793.svc.cluster.local --master_port 34567 +' ++ GPT_ARGS=' + --use-mcore-models --tensor-model-parallel-size 8 --pipeline-model-parallel-size 1 --context-parallel-size 8 --context-parallel-algo megatron_cp_algo --cp-attention-mask-type causal --use-cp-send-recv-overlap --no-create-attention-mask-in-dataloader --sparse-mode 4 --sequence-parallel --recompute-method block --recompute-granularity full --recompute-num-layers 48 --num-layers 48 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --group-query-attention --num-query-groups 8 --tokenizer-type PretrainedFromHF --tokenizer-name-or-path /data_4/models/Qwen/Qwen2.5-14B-Instruct/ --seq-length 1048576 --max-position-embeddings 1048576 --micro-batch-size 1 --global-batch-size 8 --make-vocab-size-divisible-by 1 --padded-vocab-size 152064 --rotary-base 1000000.0 --lr 5.00e-6 --train-iters 500 --lr-decay-style cosine --untie-embeddings-and-output-weights --disable-bias-linear --attention-dropout 0.0 --init-method-std 0.01 --hidden-dropout 0.0 --position-embedding-type rope --normalization RMSNorm --use-fused-rmsnorm --norm-epsilon 1e-6 --swiglu --use-flash-attn --use-fused-rotary-pos-emb --use-rotary-position-embeddings --use-fused-swiglu --use-mc2 --no-masked-softmax-fusion --attention-softmax-in-fp32 --min-lr 1.00e-7 --weight-decay 0.0 --lr-warmup-fraction 0.03 --clip-grad 1.0 --adam-beta1 0.9 --adam-beta2 0.999 --add-qkv-bias --initial-loss-scale 4096 --no-gradient-accumulation-fusion --use-distributed-optimizer --bf16 --overlap-grad-reduce --finetune --vision-model-freeze --vision-model-type intern_300m --vision-downsample-ratio 0.5 --vision-projector-type mlp --vision-projector-pre-norm --vision-process-type dynamic --vision-normalize-type imagenet --vision-seq-length 1025 --image-token-length 256 --image-size 448 --prompt-format qwen2 --is-instruction-dataset --max-num-image 4096 --max-fps 1 --add-class-token --min-patch-grid 1 --max-patch-grid 12 --logit-mask --cross-dataset-joint ' ++ DATA_ARGS=' + --data-path /local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml --split 100,0,0 --data-seq-length 1048576 --num-workers 8 ' ++ CKPT_ARGS=' + --load /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/ --vit-load / --no-load-optim --no-load-rng --seed 42424242 --save /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743// ' ++ OUTPUT_ARGS=' + --log-interval 1 --save-interval 20 --eval-interval 20 --eval-iters 0 --log-throughput --distributed-timeout-minutes 120 ' ++ torchrun --nproc_per_node 16 --nnodes 32 --node_rank 30 --master_addr train-1198772881325351168-93vlj4s2getc-master-0.train-100034032793.svc.cluster.local --master_port 34567 /local_disk/cognitron_vl//lcvlm_modellink/pretrain_lcvlm.py --use-mcore-models --tensor-model-parallel-size 8 --pipeline-model-parallel-size 1 --context-parallel-size 8 --context-parallel-algo megatron_cp_algo --cp-attention-mask-type causal --use-cp-send-recv-overlap --no-create-attention-mask-in-dataloader --sparse-mode 4 --sequence-parallel --recompute-method block --recompute-granularity full --recompute-num-layers 48 --num-layers 48 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --group-query-attention --num-query-groups 8 --tokenizer-type PretrainedFromHF --tokenizer-name-or-path /data_4/models/Qwen/Qwen2.5-14B-Instruct/ --seq-length 1048576 --max-position-embeddings 1048576 --micro-batch-size 1 --global-batch-size 8 --make-vocab-size-divisible-by 1 --padded-vocab-size 152064 --rotary-base 1000000.0 --lr 5.00e-6 --train-iters 500 --lr-decay-style cosine --untie-embeddings-and-output-weights --disable-bias-linear --attention-dropout 0.0 --init-method-std 0.01 --hidden-dropout 0.0 --position-embedding-type rope --normalization RMSNorm --use-fused-rmsnorm --norm-epsilon 1e-6 --swiglu --use-flash-attn --use-fused-rotary-pos-emb --use-rotary-position-embeddings --use-fused-swiglu --use-mc2 --no-masked-softmax-fusion --attention-softmax-in-fp32 --min-lr 1.00e-7 --weight-decay 0.0 --lr-warmup-fraction 0.03 --clip-grad 1.0 --adam-beta1 0.9 --adam-beta2 0.999 --add-qkv-bias --initial-loss-scale 4096 --no-gradient-accumulation-fusion --use-distributed-optimizer --bf16 --overlap-grad-reduce --finetune --vision-model-freeze --vision-model-type intern_300m --vision-downsample-ratio 0.5 --vision-projector-type mlp --vision-projector-pre-norm --vision-process-type dynamic --vision-normalize-type imagenet --vision-seq-length 1025 --image-token-length 256 --image-size 448 --prompt-format qwen2 --is-instruction-dataset --max-num-image 4096 --max-fps 1 --add-class-token --min-patch-grid 1 --max-patch-grid 12 --logit-mask --cross-dataset-joint --data-path /local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml --split 100,0,0 --data-seq-length 1048576 --num-workers 8 --load /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/ --vit-load / --no-load-optim --no-load-rng --seed 42424242 --save /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743// --log-interval 1 --save-interval 20 --eval-interval 20 --eval-iters 0 --log-throughput --distributed-timeout-minutes 120 --distributed-backend nccl +[2024-11-28 15:50:34,813] torch.distributed.run: [WARNING] +[2024-11-28 15:50:34,813] torch.distributed.run: [WARNING] ***************************************** +[2024-11-28 15:50:34,813] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +[2024-11-28 15:50:34,813] torch.distributed.run: [WARNING] ***************************************** +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Creating extension directory /root/.cache/torch_extensions/py38_cpu/adaptive_cp... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Emitting ninja build file /root/.cache/torch_extensions/py38_cpu/adaptive_cp/build.ninja... +Building extension module adaptive_cp... +Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N) +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +[1/2] c++ -MMD -MF adaptive_cp.o.d -DTORCH_EXTENSION_NAME=adaptive_cp -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1011\" -I/usr/local/Ascend/ascend-toolkit/latest/include -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/include -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/third_party -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/acl -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/inc -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include/TH -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include/THC -isystem /root/miniconda3/envs/py38/include/python3.8 -D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -std=c++17 -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack -fPIC -pie -Wl,--disable-new-dtags,--rpath -s -O2 -c local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/ops/csrc/algorithm/adaptive_cp/adaptive_cp.cpp -o adaptive_cp.o +[2/2] c++ adaptive_cp.o -shared -L/usr/local/Ascend/ascend-toolkit/latest/lib64 -lascendcl -L/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/lib -ltorch_npu -L/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/lib -lc10 -ltorch_cpu -ltorch -ltorch_python -o adaptive_cp.so +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +> compiling dataset index builder ... +make: Entering directory 'local_disk/cognitron_vl/third_party/Megatron-LM_core_r0.6.0/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory 'local_disk/cognitron_vl/third_party/Megatron-LM_core_r0.6.0/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.214 seconds +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute Falsevision_projector_recompute False + +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.vision_model_freeze + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +vision_model_freeze +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. + +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +vision_model_freeze +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.vision_model_freeze +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. + + + +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.vision_model_freeze + +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) + + + + + + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + + + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration _load_base_checkpoint iteration _load_base_checkpoint iteration _load_base_checkpoint iteration_load_base_checkpoint iteration 1000 + 10001000 1000 1000 10001000 +1000 +_load_base_checkpoint release_load_base_checkpoint release + 1000_load_base_checkpoint release + +1000 + 1000100010001000 + + +_load_base_checkpoint release_load_base_checkpoint release False +_load_base_checkpoint release_load_base_checkpoint release + 1000_load_base_checkpoint release_load_base_checkpoint release1000 + +_load_base_checkpoint release_load_base_checkpoint release _load_base_checkpoint release False + +False_load_base_checkpoint releaseFalse False + +_load_base_checkpoint release FalseFalseFalse + + +False +False_load_base_checkpoint releaseFalse_load_base_checkpoint release False + +False + + + False + +FalseFalse + + +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_04/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_04/model_optim_rng.pt +_load_base_checkpoint_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_07/model_optim_rng.pt/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_05/model_optim_rng.pt + +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_06/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_02/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_06/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_00/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_07/model_optim_rng.pt +_load_base_checkpoint_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_05/model_optim_rng.pt/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_00/model_optim_rng.pt_load_base_checkpoint + + /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_03/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_01/model_optim_rng.pt +_load_base_checkpoint_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_01/model_optim_rng.pt/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_03/model_optim_rng.pt + +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_02/model_optim_rng.pt +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +> rank 492 does not create GPT datasets ... +> rank 495 does not create GPT datasets ... +> rank 484 does not create GPT datasets ... +> rank 494 does not create GPT datasets ... +> rank 493 does not create GPT datasets ...> rank 485 does not create GPT datasets ...> rank 480 is creating GPT datasets ...> rank 487 does not create GPT datasets ... + + + +> rank 489 does not create GPT datasets ... +> rank 491 does not create GPT datasets ...> rank 490 does not create GPT datasets ... +> rank 488 is creating GPT datasets ...> rank 482 does not create GPT datasets ... +> rank 483 does not create GPT datasets ... + + +> rank 481 does not create GPT datasets ... +> rank 486 does not create GPT datasets ... +target_ratios [(1, 1), (1, 2), (2, 1), (3, 1), (1, 3), (2, 2), (4, 1), (1, 4), (5, 1), (1, 5), (1, 6), (6, 1), (3, 2), (2, 3), (7, 1), (1, 7), (4, 2), (2, 4), (1, 8), (8, 1), (1, 9), (3, 3), (9, 1), (2, 5), (5, 2), (10, 1), (1, 10), (11, 1), (1, 11), (12, 1), (3, 4), (4, 3), (1, 12), (6, 2), (2, 6)] +possible_resolutions [[448, 448], [448, 896], [896, 448], [1344, 448], [448, 1344], [896, 896], [1792, 448], [448, 1792], [2240, 448], [448, 2240], [448, 2688], [2688, 448], [1344, 896], [896, 1344], [3136, 448], [448, 3136], [1792, 896], [896, 1792], [448, 3584], [3584, 448], [448, 4032], [1344, 1344], [4032, 448], [896, 2240], [2240, 896], [4480, 448], [448, 4480], [4928, 448], [448, 4928], [5376, 448], [1344, 1792], [1792, 1344], [448, 5376], [2688, 896], [896, 2688]] +target_ratios [(1, 1), (1, 2), (2, 1), (3, 1), (1, 3), (2, 2), (4, 1), (1, 4), (5, 1), (1, 5), (1, 6), (6, 1), (3, 2), (2, 3), (7, 1), (1, 7), (4, 2), (2, 4), (1, 8), (8, 1), (1, 9), (3, 3), (9, 1), (2, 5), (5, 2), (10, 1), (1, 10), (11, 1), (1, 11), (12, 1), (3, 4), (4, 3), (1, 12), (6, 2), (2, 6)] +possible_resolutions [[448, 448], [448, 896], [896, 448], [1344, 448], [448, 1344], [896, 896], [1792, 448], [448, 1792], [2240, 448], [448, 2240], [448, 2688], [2688, 448], [1344, 896], [896, 1344], [3136, 448], [448, 3136], [1792, 896], [896, 1792], [448, 3584], [3584, 448], [448, 4032], [1344, 1344], [4032, 448], [896, 2240], [2240, 896], [4480, 448], [448, 4480], [4928, 448], [448, 4928], [5376, 448], [1344, 1792], [1792, 1344], [448, 5376], [2688, 896], [896, 2688]] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fd1f5980] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 100 unjoint_samples 100 joint_samples 0 [136301, 185903] +processed_samples 100 unjoint_samples 100 joint_samples 0 [136301, 185903] +processed_samples 100 unjoint_samples 100 joint_samples 0 [161329, 159174] +processed_samples 100 unjoint_samples 100 joint_samples 0 [135670, 136846] +processed_samples 100 unjoint_samples 100 joint_samples 0 [230777, 221579] +processed_samples 100 unjoint_samples 100 joint_samples 0 [230777, 221579] +processed_samples 100 unjoint_samples 100 joint_samples 0 [161329, 159174] +processed_samples 100 unjoint_samples 100 joint_samples 0 [135670, 136846] +processed_samples 100 unjoint_samples 100 joint_samples 0 [185666, 185971] +processed_samples 100 unjoint_samples 100 joint_samples 0 [185666, 185971] +processed_samples 100 unjoint_samples 100 joint_samples 0 [136013, 137062] +processed_samples 100 unjoint_samples 100 joint_samples 0 [136013, 137062] +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +processed_samples 100 unjoint_samples 100 joint_samples 0 [144154, 142029] +processed_samples 100 unjoint_samples 100 joint_samples 0 [144154, 142029] +processed_samples 100 unjoint_samples 100 joint_samples 0 [142372, 140436] +processed_samples 100 unjoint_samples 100 joint_samples 0 [142372, 140436] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x555a00355040] mmco: unref short failure +[h264 @ 0x555a00355040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +processed_samples 200 unjoint_samples 200 joint_samples 0 [442317, 476094] +processed_samples 200 unjoint_samples 200 joint_samples 0 [442317, 476094] +processed_samples 200 unjoint_samples 200 joint_samples 0 [304017, 303803] +processed_samples 200 unjoint_samples 200 joint_samples 0 [308595, 302406] +processed_samples 200 unjoint_samples 200 joint_samples 0 [304017, 303803] +processed_samples 200 unjoint_samples 200 joint_samples 0 [308595, 302406] +processed_samples 200 unjoint_samples 200 joint_samples 0 [301352, 305263] +processed_samples 200 unjoint_samples 200 joint_samples 0 [301352, 305263] +processed_samples 200 unjoint_samples 200 joint_samples 0 [317896, 339618] +processed_samples 200 unjoint_samples 200 joint_samples 0 [394104, 382765] +processed_samples 200 unjoint_samples 200 joint_samples 0 [394104, 382765] +processed_samples 200 unjoint_samples 200 joint_samples 0 [317896, 339618] +processed_samples 200 unjoint_samples 200 joint_samples 0 [276598, 277077] +processed_samples 200 unjoint_samples 200 joint_samples 0 [276598, 277077] +processed_samples 200 unjoint_samples 200 joint_samples 0 [361042, 360328] +processed_samples 200 unjoint_samples 200 joint_samples 0 [361042, 360328] +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +processed_samples 300 unjoint_samples 300 joint_samples 0 [492229, 488923] +processed_samples 300 unjoint_samples 300 joint_samples 0 [492229, 488923] +processed_samples 300 unjoint_samples 300 joint_samples 0 [477837, 477325] +processed_samples 300 unjoint_samples 300 joint_samples 0 [477837, 477325] +processed_samples 300 unjoint_samples 300 joint_samples 0 [590298, 590047] +processed_samples 300 unjoint_samples 300 joint_samples 0 [590298, 590047] +processed_samples 300 unjoint_samples 300 joint_samples 0 [500107, 497881] +processed_samples 300 unjoint_samples 300 joint_samples 0 [500107, 497881] +processed_samples 300 unjoint_samples 300 joint_samples 0 [400463, 400576] +processed_samples 300 unjoint_samples 300 joint_samples 0 [400463, 400576] +processed_samples 300 unjoint_samples 300 joint_samples 0 [604230, 621018] +processed_samples 300 unjoint_samples 300 joint_samples 0 [604230, 621018] +processed_samples 300 unjoint_samples 300 joint_samples 0 [511598, 511244] +processed_samples 300 unjoint_samples 300 joint_samples 0 [511598, 511244] +processed_samples 300 unjoint_samples 300 joint_samples 0 [513168, 514079] +processed_samples 300 unjoint_samples 300 joint_samples 0 [513168, 514079] +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a012d1080] mmco: unref short failure +[h264 @ 0x555a012d1080] mmco: unref short failure +[h264 @ 0x555a04eb1380] mmco: unref short failure +[h264 @ 0x555a04eb1380] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f00553c0] mmco: unref short failure +[h264 @ 0x5649f00553c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +processed_samples 400 unjoint_samples 400 joint_samples 0 [645141, 644007] +processed_samples 400 unjoint_samples 400 joint_samples 0 [645141, 644007] +processed_samples 400 unjoint_samples 400 joint_samples 0 [672742, 688581] +processed_samples 400 unjoint_samples 400 joint_samples 0 [682733, 684223] +processed_samples 400 unjoint_samples 400 joint_samples 0 [629361, 635981] +processed_samples 400 unjoint_samples 400 joint_samples 0 [773371, 774327] +processed_samples 400 unjoint_samples 400 joint_samples 0 [682733, 684223] +processed_samples 400 unjoint_samples 400 joint_samples 0 [629361, 635981] +processed_samples 400 unjoint_samples 400 joint_samples 0 [672742, 688581] +processed_samples 400 unjoint_samples 400 joint_samples 0 [707094, 705592] +processed_samples 400 unjoint_samples 400 joint_samples 0 [773371, 774327] +processed_samples 400 unjoint_samples 400 joint_samples 0 [821555, 816781] +processed_samples 400 unjoint_samples 400 joint_samples 0 [707094, 705592] +processed_samples 400 unjoint_samples 400 joint_samples 0 [658632, 655574] +processed_samples 400 unjoint_samples 400 joint_samples 0 [658632, 655574] +processed_samples 400 unjoint_samples 400 joint_samples 0 [821555, 816781] +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +processed_samples 500 unjoint_samples 500 joint_samples 0 [841955, 842678] +processed_samples 500 unjoint_samples 500 joint_samples 0 [841955, 842678] +processed_samples 500 unjoint_samples 500 joint_samples 0 [983147, 985847] +processed_samples 500 unjoint_samples 500 joint_samples 0 [881560, 881559] +processed_samples 500 unjoint_samples 500 joint_samples 0 [894887, 912251] +processed_samples 500 unjoint_samples 500 joint_samples 0 [983147, 985847] +processed_samples 500 unjoint_samples 500 joint_samples 0 [881560, 881559] +processed_samples 500 unjoint_samples 500 joint_samples 0 [894887, 912251] +processed_samples 500 unjoint_samples 500 joint_samples 0 [853383, 835984] +processed_samples 500 unjoint_samples 500 joint_samples 0 [853383, 835984] +processed_samples 500 unjoint_samples 500 joint_samples 0 [920412, 921212] +processed_samples 500 unjoint_samples 500 joint_samples 0 [920412, 921212] +processed_samples 500 unjoint_samples 500 joint_samples 0 [811098, 880230] +processed_samples 500 unjoint_samples 500 joint_samples 0 [811098, 880230] +processed_samples 500 unjoint_samples 500 joint_samples 0 [830129, 831340] +processed_samples 500 unjoint_samples 500 joint_samples 0 [830129, 831340] +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eec6dac0] mmco: unref short failure +[h264 @ 0x5559fd7d6f00] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +................................................................................................[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01d6bb40] mmco: unref short failure +[h264 @ 0x555a01d6bb40] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a01d6bb40] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ffce3ac0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5559fddcae00] mmco: unref short failure +[h264 @ 0x5559fddcae00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +processed_samples 600 unjoint_samples 600 joint_samples 1 [1046784, 140984] +processed_samples 600 unjoint_samples 600 joint_samples 1 [992692, 253508] +processed_samples 600 unjoint_samples 600 joint_samples 1 [329003, 1016149] +processed_samples 600 unjoint_samples 600 joint_samples 0 [935448, 933940] +processed_samples 600 unjoint_samples 600 joint_samples 1 [1046784, 140984] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1023421, 1025360] +processed_samples 600 unjoint_samples 600 joint_samples 1 [992692, 253508] +processed_samples 600 unjoint_samples 600 joint_samples 0 [999002, 1001861] +processed_samples 600 unjoint_samples 600 joint_samples 1 [329003, 1016149] +processed_samples 600 unjoint_samples 600 joint_samples 0 [968364, 969654] +processed_samples 600 unjoint_samples 600 joint_samples 0 [935448, 933940] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1023421, 1025360] +processed_samples 600 unjoint_samples 600 joint_samples 0 [999002, 1001861] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1028346, 1017978] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1028346, 1017978] +processed_samples 600 unjoint_samples 600 joint_samples 0 [968364, 969654] +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01c15a00] mmco: unref short failure +[h264 @ 0x555a01c15a00] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a01baf580] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 700 unjoint_samples 700 joint_samples 1 [252030, 979033] +processed_samples 700 unjoint_samples 700 joint_samples 1 [115459, 1044928] +processed_samples 700 unjoint_samples 700 joint_samples 1 [252030, 979033] +processed_samples 700 unjoint_samples 700 joint_samples 1 [115459, 1044928] +processed_samples 700 unjoint_samples 700 joint_samples 1 [332780, 1018922] +processed_samples 700 unjoint_samples 700 joint_samples 1 [332780, 1018922] +processed_samples 700 unjoint_samples 700 joint_samples 1 [344433, 1036861] +processed_samples 700 unjoint_samples 700 joint_samples 1 [344433, 1036861] +processed_samples 700 unjoint_samples 700 joint_samples 1 [191911, 1045441] +processed_samples 700 unjoint_samples 700 joint_samples 1 [191911, 1045441] +processed_samples 700 unjoint_samples 700 joint_samples 1 [643940, 1016149] +processed_samples 700 unjoint_samples 700 joint_samples 1 [643940, 1016149] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1046784, 453302] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1046784, 453302] +processed_samples 700 unjoint_samples 700 joint_samples 1 [992692, 517451] +processed_samples 700 unjoint_samples 700 joint_samples 1 [992692, 517451] +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a0113ac00] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559fddcae00] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +processed_samples 800 unjoint_samples 800 joint_samples 1 [600026, 1018922] +processed_samples 800 unjoint_samples 800 joint_samples 1 [480367, 1044928] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1046784, 733804] +processed_samples 800 unjoint_samples 800 joint_samples 1 [443725, 1045441] +processed_samples 800 unjoint_samples 800 joint_samples 1 [730050, 979033] +processed_samples 800 unjoint_samples 800 joint_samples 1 [992692, 843491] +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 800 unjoint_samples 800 joint_samples 1 [980254, 1016149] +processed_samples 800 unjoint_samples 800 joint_samples 1 [721471, 1036861] +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +processed_samples 800 unjoint_samples 800 joint_samples 1 [600026, 1018922] +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +processed_samples 800 unjoint_samples 800 joint_samples 1 [480367, 1044928] +processed_samples 800 unjoint_samples 800 joint_samples 1 [992692, 843491] +processed_samples 800 unjoint_samples 800 joint_samples 1 [443725, 1045441] +processed_samples 800 unjoint_samples 800 joint_samples 1 [980254, 1016149] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1046784, 733804] +processed_samples 800 unjoint_samples 800 joint_samples 1 [721471, 1036861] +processed_samples 800 unjoint_samples 800 joint_samples 1 [730050, 979033] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a0113ac00] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f24b3080] mmco: unref short failure +[h264 @ 0x5649f24b3080] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +processed_samples 900 unjoint_samples 900 joint_samples 1 [685603, 1044928] +processed_samples 900 unjoint_samples 900 joint_samples 1 [685603, 1044928] +processed_samples 900 unjoint_samples 900 joint_samples 1 [870427, 1018922] +processed_samples 900 unjoint_samples 900 joint_samples 2 [1035512, 215070] +processed_samples 900 unjoint_samples 900 joint_samples 2 [169160, 1045271] +processed_samples 900 unjoint_samples 900 joint_samples 1 [870427, 1018922] +processed_samples 900 unjoint_samples 900 joint_samples 2 [1035512, 215070] +processed_samples 900 unjoint_samples 900 joint_samples 2 [169160, 1045271] +processed_samples 900 unjoint_samples 900 joint_samples 2 [44304, 1042650] +processed_samples 900 unjoint_samples 900 joint_samples 2 [44304, 1042650] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1009966, 1036861] +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 900 unjoint_samples 900 joint_samples 1 [769375, 1045441] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1009966, 1036861] +[h264 @ 0x5649efd953c0] mmco: unref short failure +processed_samples 900 unjoint_samples 900 joint_samples 1 [769375, 1045441] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1002137, 1001356] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1002137, 1001356] +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1045990, 46173] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1045990, 46173] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1029364, 115962] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1029364, 115962] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1047713, 111784] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1047713, 111784] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [84176, 1047539] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [84176, 1047539] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [389118, 1042650] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [389118, 1042650] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1035512, 460681] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1035512, 460681] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [542798, 1045271] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [542798, 1045271] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1036356, 343956] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1036356, 343956] +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1047713, 390717] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1045990, 377675] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1029364, 428331] +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [379155, 1047539] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [937076, 1045271] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [737670, 1042650] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1036356, 690281] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1035512, 745640] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1047713, 390717] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1045990, 377675] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1029364, 428331] +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [937076, 1045271] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [379155, 1047539] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1036356, 690281] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [737670, 1042650] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1035512, 745640] +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1029364, 716107] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1045990, 736951] +processed_samples 1200 unjoint_samples 1200 joint_samples 3 [178926, 1047160] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [963999, 1042650] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1043823, 1042971] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1047713, 764007] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1036356, 1004222] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [660021, 1047539] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1045990, 736951] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1029364, 716107] +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +processed_samples 1200 unjoint_samples 1200 joint_samples 3 [178926, 1047160] +[h264 @ 0x555a011b9980] mmco: unref short failure +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1047713, 764007] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1036356, 1004222] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [963999, 1042650] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1043823, 1042971] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [660021, 1047539] +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a01468bc0] mmco: unref short failure +[h264 @ 0x555a01468bc0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649ee7de5c0] mmco: unref short failure +[h264 @ 0x5649ee7de5c0] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1046489, 7826] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1046489, 7826] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1045990, 1007691] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1045990, 1007691] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [481372, 1047160] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [481372, 1047160] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1047713, 994410] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1047713, 994410] +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [4593, 1047539] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [4593, 1047539] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1045557, 290992] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1045557, 290992] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [275734, 1043972] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [275734, 1043972] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1048104, 255593] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1048104, 255593] +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [176477, 1047743] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1046489, 339204] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [232404, 1047554] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [176477, 1047743] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [329617, 1047539] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1046489, 339204] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [232404, 1047554] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [612513, 1043972] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [329617, 1047539] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1045557, 589201] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1048104, 574654] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [817074, 1047160] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [612513, 1043972] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1045557, 589201] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1048104, 574654] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [817074, 1047160] +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 1500 unjoint_samples 1500 joint_samples 4 [15562, 1047160] +processed_samples 1500 unjoint_samples 1500 joint_samples 4 [15562, 1047160] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [616066, 1047554] +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1046489, 623184] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1046489, 623184] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [616066, 1047554] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1048104, 907324] +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1045557, 935157] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1048104, 907324] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [624037, 1047539] +[h264 @ 0x5559fd15f840] mmco: unref short failure +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [908565, 1043972] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [624037, 1047539] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1045557, 935157] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [908565, 1043972] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [602378, 1047743] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [602378, 1047743] +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649eec6dac0] mmco: unref short failure +[h264 @ 0x5649eec6dac0] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1046489, 946723] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [114808, 1047945] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [114808, 1047945] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1046489, 946723] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [286993, 1047160] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [286993, 1047160] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [155550, 1042992] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [285396, 1047119] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [155550, 1042992] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [285396, 1047119] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1010244, 1047539] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1010244, 1047539] +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [927098, 1047743] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [927098, 1047743] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [917247, 1047554] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [917247, 1047554] +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a0116d6c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a0116d6c0] mmco: unref short failure +[h264 @ 0x555a0116d6c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [152708, 1047743] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [152708, 1047743] +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1046327, 286245] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1046327, 286245] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [227378, 1046063] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [227378, 1046063] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [369421, 1047945] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [369421, 1047945] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1042972, 142693] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1042972, 142693] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [570080, 1047160] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [570080, 1047160] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [435158, 1042992] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [435158, 1042992] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [644299, 1047119] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [644299, 1047119] +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [577867, 1046063] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [577867, 1046063] +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1042972, 527675] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1042972, 527675] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [429748, 1047743] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [429748, 1047743] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [708231, 1042992] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [708231, 1042992] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1046327, 547165] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [896495, 1047160] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1046327, 547165] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [896495, 1047160] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [627333, 1047945] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [627333, 1047945] +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [871854, 1047119] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [871854, 1047119] +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [743031, 1047743] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1042972, 818526] +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [273341, 1047160] +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [105106, 1047119] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1042972, 818526] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [743031, 1047743] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [862930, 1046063] +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [273341, 1047160] +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [105106, 1047119] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [862930, 1046063] +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1046327, 1006557] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1031627, 1047945] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1039483, 1042992] +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1046327, 1006557] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1031627, 1047945] +[h264 @ 0x555a01d6bb40] mmco: unref short failure +[h264 @ 0x555a01d6bb40] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1039483, 1042992] +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ffce3ac0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1025880, 172231] +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1045633, 404501] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1025880, 172231] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [614626, 1047160] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1047005, 247576] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1046427, 219041] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [82963, 1044981] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [389277, 1047119] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1045633, 404501] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [614626, 1047160] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1047005, 247576] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1046427, 219041] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [82963, 1044981] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1025173, 1047743] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [389277, 1047119] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1025173, 1047743] +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1025880, 494031] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1025880, 494031] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [417457, 1044981] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [417457, 1044981] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1029293, 416295] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1029293, 416295] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1046427, 573043] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1046427, 573043] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1047005, 733854] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1047005, 733854] +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1045633, 793613] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1045633, 793613] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [924889, 1047160] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [924889, 1047160] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [678301, 1047119] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [678301, 1047119] +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x555a020decc0] mmco: unref short failure +[h264 @ 0x555a020decc0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee5e1f80] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [64780, 1022454] +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [64780, 1022454] +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [153094, 1047160] +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [153094, 1047160] +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [17659, 1045609] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1029293, 760834] +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [17659, 1045609] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1029293, 760834] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [680997, 1044981] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1025880, 953414] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [680997, 1044981] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1025880, 953414] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1025348, 1047119] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1046427, 997952] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1025348, 1047119] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1046427, 997952] +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649e36e55c0] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [168460, 1021869] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [168460, 1021869] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [299051, 1046402] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [299051, 1046402] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [328027, 1045609] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [328027, 1045609] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [384015, 1022454] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [384015, 1022454] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [1046773, 296268] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [1046773, 296268] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [418718, 1047160] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [418718, 1047160] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [997435, 1044981] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [997435, 1044981] +[h264 @ 0x555a01468bc0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1029293, 999533] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1029293, 999533] +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [214219, 1046833] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [531637, 1021869] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [531637, 1021869] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [214219, 1046833] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1041184, 189346] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1041184, 189346] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [662814, 1045609] +[h264 @ 0x5559fd15f840] mmco: unref short failure +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [662814, 1045609] +[h264 @ 0x5649f704ae40] mmco: unref short failure +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [562488, 1046402] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1046773, 620573] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [779201, 1047160] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1046773, 620573] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [779201, 1047160] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [717427, 1022454] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [562488, 1046402] +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [717427, 1022454] +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [963034, 1045609] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [963034, 1045609] +processed_samples 2500 unjoint_samples 2500 joint_samples 7 [1040035, 152091] +processed_samples 2500 unjoint_samples 2500 joint_samples 7 [1040035, 152091] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1041184, 484451] +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1041184, 484451] +processed_samples 2500 unjoint_samples 2500 joint_samples 7 [1037182, 27542] +processed_samples 2500 unjoint_samples 2500 joint_samples 7 [1037182, 27542] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [653431, 1046833] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [653431, 1046833] +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [780576, 1021869] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [780576, 1021869] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [898543, 1046402] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [898543, 1046402] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1046773, 1005163] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1046773, 1005163] +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1030392, 45401] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1030392, 45401] +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1037182, 451076] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1037182, 451076] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1041184, 749174] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1033539, 202907] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1033539, 202907] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1041184, 749174] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1047495, 231869] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1047495, 231869] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [218226, 1046760] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [218226, 1046760] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1040035, 418957] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1040035, 418957] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [920232, 1046833] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [920232, 1046833] +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f00553c0] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1030392, 346973] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [182480, 1023531] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [181982, 1046833] +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1030392, 346973] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [501986, 1046760] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1047495, 577059] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [182480, 1023531] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [181982, 1046833] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1037182, 785261] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [501986, 1046760] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1047495, 577059] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1033539, 488905] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1040035, 635866] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1033539, 488905] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1040035, 635866] +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f00553c0] mmco: unref short failure +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1037182, 785261] +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [458308, 1046833] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [458308, 1046833] +processed_samples 2800 unjoint_samples 2800 joint_samples 8 [10513, 1046053] +processed_samples 2800 unjoint_samples 2800 joint_samples 8 [10513, 1046053] +[h264 @ 0x5649efd953c0] mmco: unref short failure +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1040035, 936194] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1047495, 905613] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1040035, 936194] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1030392, 694809] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [762415, 1046760] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [762415, 1046760] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1047495, 905613] +[h264 @ 0x5559fceb9280] mmco: unref short failure +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1030392, 694809] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [542048, 1023531] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [542048, 1023531] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1033539, 903348] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1033539, 903348] +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a0175a700] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [183546, 1039187] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [912879, 1023531] +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [192338, 1036659] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1004522, 1046760] +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [159690, 1030365] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1030392, 1025591] +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [266009, 1046053] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [820339, 1046833] +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [183546, 1039187] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [912879, 1023531] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [820339, 1046833] +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [192338, 1036659] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1004522, 1046760] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1030392, 1025591] +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [159690, 1030365] +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [266009, 1046053] +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fd155200] mmco: unref short failure +[h264 @ 0x5559fd155200] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1043814, 238270] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1043814, 238270] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [209876, 1047479] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [209876, 1047479] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1038574, 108667] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1038574, 108667] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [467679, 1039187] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [442152, 1030365] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [467679, 1039187] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [442152, 1030365] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1032263, 156247] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1032263, 156247] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [562463, 1046053] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [562463, 1046053] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [579221, 1036659] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [579221, 1036659] +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1038574, 509673] +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [470770, 1047479] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1032263, 503308] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1043814, 597379] +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [825304, 1039187] +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [894532, 1036659] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [893361, 1046053] +[h264 @ 0x5649f260c300] mmco: unref short failure +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [780811, 1030365] +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1038574, 509673] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [470770, 1047479] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1032263, 503308] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1043814, 597379] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [825304, 1039187] +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [893361, 1046053] +[h264 @ 0x5559ff947380] mmco: unref short failure +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [894532, 1036659] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [780811, 1030365] +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a01468bc0] mmco: unref short failure +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [1046643, 153549] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1043814, 892001] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1032263, 770555] +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [1033502, 182363] +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [154998, 1032569] +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [1047130, 373515] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1038574, 944139] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [851493, 1047479] +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1038574, 944139] +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [1046643, 153549] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1032263, 770555] +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1043814, 892001] +[h264 @ 0x5649ef013980] mmco: unref short failure +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [1033502, 182363] +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [154998, 1032569] +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [1047130, 373515] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [851493, 1047479] +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ee170b40] mmco: unref short failure +[h264 @ 0x5649ee170b40] mmco: unref short failure +[h264 @ 0x5649ee170b40] mmco: unref short failure +[h264 @ 0x5649ee170b40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x555a008ecd80] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x555a008ecd80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1046506, 84077] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1044719, 190027] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1046590, 224059] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1033502, 425149] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1046643, 487770] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [400124, 1032569] +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [1044563, 1042250] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1047130, 726736] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1046506, 84077] +[h264 @ 0x5649ee1e0200] mmco: unref short failure +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1044719, 190027] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1046590, 224059] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1046643, 487770] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1033502, 425149] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [400124, 1032569] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1047130, 726736] +[h264 @ 0x5649ee8a2900] mmco: unref short failure +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [1044563, 1042250] +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1033502, 756251] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1033502, 756251] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1046590, 456265] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1046590, 456265] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1046506, 428089] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1046506, 428089] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1044719, 601555] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1044719, 601555] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1046742, 222638] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1046742, 222638] +processed_samples 3400 unjoint_samples 3400 joint_samples 10 [655391, 993746] +processed_samples 3400 unjoint_samples 3400 joint_samples 10 [655391, 993746] +[h264 @ 0x5649f03086c0] [h264 @ 0x555a067f2680] mmco: unref short failure +mmco: unref short failure +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1046643, 969469] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1046643, 969469] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [679497, 1032569] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [679497, 1032569] +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fdd2fec0] mmco: unref short failure +[h264 @ 0x5559fdd2fec0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1033502, 1044301] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1046590, 765565] +processed_samples 3500 unjoint_samples 3500 joint_samples 10 [223735, 1047330] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1046742, 545020] +processed_samples 3500 unjoint_samples 3500 joint_samples 10 [1047244, 10668] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1033502, 1044301] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1046590, 765565] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1046506, 752241] +processed_samples 3500 unjoint_samples 3500 joint_samples 10 [223735, 1047330] +processed_samples 3500 unjoint_samples 3500 joint_samples 10 [1047244, 10668] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1046742, 545020] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1046506, 752241] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1034621, 1034829] +processed_samples 3500 unjoint_samples 3500 joint_samples 10 [933465, 993746] +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1034621, 1034829] +processed_samples 3500 unjoint_samples 3500 joint_samples 10 [933465, 993746] +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1046590, 15967] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1046590, 15967] +processed_samples 3600 unjoint_samples 3600 joint_samples 11 [1037619, 181752] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [299263, 1045384] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [299263, 1045384] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1046742, 769797] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1046742, 769797] +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 3600 unjoint_samples 3600 joint_samples 11 [1037619, 181752] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1044335, 332165] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1044335, 332165] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1046506, 1042136] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1046506, 1042136] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1047244, 305151] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1047244, 305151] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [565551, 1047330] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [565551, 1047330] +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649ef3024c0] mmco: unref short failure +[h264 @ 0x5649ef3024c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +processed_samples 3700 unjoint_samples 3700 joint_samples 9 [1046742, 1013845] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [216677, 1044651] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1046590, 334641] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1047244, 607285] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1044335, 574437] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1011196, 1047330] +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [608115, 1045384] +processed_samples 3700 unjoint_samples 3700 joint_samples 11 [1037619, 632873] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [216677, 1044651] +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [608115, 1045384] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1046590, 334641] +processed_samples 3700 unjoint_samples 3700 joint_samples 11 [1037619, 632873] +processed_samples 3700 unjoint_samples 3700 joint_samples 9 [1046742, 1013845] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1044335, 574437] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1047244, 607285] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1011196, 1047330] +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a008ecd80] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2666900] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1046590, 621579] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1044335, 832002] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1046790, 390463] +processed_samples 3800 unjoint_samples 3800 joint_samples 11 [242157, 1047330] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1047244, 929345] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [606617, 1044651] +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [910276, 1045384] +[h264 @ 0x555a06786a80] mmco: unref short failure +processed_samples 3800 unjoint_samples 3800 joint_samples 11 [1037619, 908765] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1047244, 929345] +processed_samples 3800 unjoint_samples 3800 joint_samples 11 [242157, 1047330] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [606617, 1044651] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1044335, 832002] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1046790, 390463] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1046590, 621579] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [910276, 1045384] +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +processed_samples 3800 unjoint_samples 3800 joint_samples 11 [1037619, 908765] +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [244902, 1046490] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [805089, 1044651] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1046790, 707809] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [1045136, 195057] +processed_samples 3900 unjoint_samples 3900 joint_samples 12 [1042702, 181960] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [244902, 1046490] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [153527, 1039099] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [153527, 1039099] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1046790, 707809] +processed_samples 3900 unjoint_samples 3900 joint_samples 12 [1042702, 181960] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [500248, 1047330] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [1045136, 195057] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1046590, 967625] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [805089, 1044651] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [500248, 1047330] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1046590, 967625] +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1047088, 6440] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1047088, 6440] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [830368, 1047330] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [830368, 1047330] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [46408, 1037817] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [46408, 1037817] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1045136, 461770] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1045136, 461770] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [563123, 1046490] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [458049, 1039099] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [563123, 1046490] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [458049, 1039099] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1046590, 171890] +processed_samples 4000 unjoint_samples 4000 joint_samples 12 [1042702, 496263] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1046590, 171890] +processed_samples 4000 unjoint_samples 4000 joint_samples 12 [1042702, 496263] +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649eec6dac0] mmco: unref short failure +[h264 @ 0x5649eec6dac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1046590, 563874] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [409121, 1037817] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1047088, 285789] +[h264 @ 0x5649ef013980] mmco: unref short failure +processed_samples 4100 unjoint_samples 4100 joint_samples 12 [993319, 204450] +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [824691, 1046490] +processed_samples 4100 unjoint_samples 4100 joint_samples 12 [1042702, 905179] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1045136, 727958] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [777555, 1039099] +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1046590, 563874] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1047088, 285789] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [409121, 1037817] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [777555, 1039099] +processed_samples 4100 unjoint_samples 4100 joint_samples 12 [993319, 204450] +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [824691, 1046490] +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1045136, 727958] +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 4100 unjoint_samples 4100 joint_samples 12 [1042702, 905179] +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a008ecd80] mmco: unref short failure +[h264 @ 0x555a008ecd80] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5559fddcae00] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] [h264 @ 0x5559ffcdde40] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x5559ffcdde40] [h264 @ 0x5649ef22d0c0] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1047088, 636316] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1047088, 636316] +processed_samples 4200 unjoint_samples 4200 joint_samples 13 [1046693, 107270] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [83138, 1010857] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [83138, 1010857] +processed_samples 4200 unjoint_samples 4200 joint_samples 13 [1046693, 107270] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [981983, 307130] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [981983, 307130] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [37300, 1046490] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [37300, 1046490] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1046590, 865156] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1046590, 865156] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [993319, 475002] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [993319, 475002] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [734402, 1037817] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [734402, 1037817] +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [981983, 615910] +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1047088, 890480] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [102485, 1046252] +processed_samples 4300 unjoint_samples 4300 joint_samples 13 [1046693, 415812] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [406857, 1046490] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [412630, 1010857] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [993319, 788105] +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1047088, 890480] +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1011278, 1037817] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [981983, 615910] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [102485, 1046252] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [406857, 1046490] +processed_samples 4300 unjoint_samples 4300 joint_samples 13 [1046693, 415812] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [412630, 1010857] +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [993319, 788105] +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1011278, 1037817] +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649f00553c0] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x555a01468bc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1047088, 109545] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1047088, 109545] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [520419, 1046252] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [520419, 1046252] +processed_samples 4400 unjoint_samples 4400 joint_samples 13 [1042102, 33067] +processed_samples 4400 unjoint_samples 4400 joint_samples 13 [1042102, 33067] +processed_samples 4400 unjoint_samples 4400 joint_samples 13 [1046693, 688866] +processed_samples 4400 unjoint_samples 4400 joint_samples 13 [1046693, 688866] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1041692, 359584] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1041692, 359584] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [768511, 1046490] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [768511, 1046490] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [981983, 886177] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [981983, 886177] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [743694, 1010857] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [743694, 1010857] +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [1036673, 41529] +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [1036673, 41529] +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1047088, 454637] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1047088, 454637] +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [10701, 1046490] +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [10701, 1046490] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1041692, 612687] +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [1042102, 270411] +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [1042102, 270411] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [834087, 1046252] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1041692, 612687] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1038438, 1038449] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [834087, 1046252] +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [1046693, 1013490] +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [1046693, 1013490] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1038438, 1038449] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01603b40] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1036673, 353785] +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [1047088, 687956] +processed_samples 4600 unjoint_samples 4600 joint_samples 14 [311681, 1031759] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1042766, 104271] +[h264 @ 0x5649f2f12340] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [360930, 1045107] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1042102, 534994] +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [1041692, 896915] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [257614, 1046490] +[h264 @ 0x5559ff97fb80] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1036673, 353785] +[h264 @ 0x5649eef38580] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1042766, 104271] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 14 [311681, 1031759] +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [1047088, 687956] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [360930, 1045107] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [257614, 1046490] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1042102, 534994] +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [1041692, 896915] +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649eee38a00] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [62227, 1003115] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [62227, 1003115] +processed_samples 4700 unjoint_samples 4700 joint_samples 14 [612607, 1031759] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [157102, 1036574] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1042766, 381816] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [157102, 1036574] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1036673, 732799] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1042766, 381816] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [576384, 1045107] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1036673, 732799] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [576384, 1045107] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [515158, 1046490] +processed_samples 4700 unjoint_samples 4700 joint_samples 14 [612607, 1031759] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [515158, 1046490] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1042102, 837593] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1042102, 837593] +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [842164, 1046490] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [409213, 1003115] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [374110, 1036574] +processed_samples 4800 unjoint_samples 4800 joint_samples 14 [126876, 1033289] +processed_samples 4800 unjoint_samples 4800 joint_samples 14 [947205, 1031759] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1042766, 729396] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1036673, 997318] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [915955, 1045107] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [842164, 1046490] +processed_samples 4800 unjoint_samples 4800 joint_samples 14 [126876, 1033289] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1042766, 729396] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [374110, 1036574] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [409213, 1003115] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1036673, 997318] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [915955, 1045107] +processed_samples 4800 unjoint_samples 4800 joint_samples 14 [947205, 1031759] +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x5649f2113b40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [59990, 1047184] +processed_samples 4900 unjoint_samples 4900 joint_samples 15 [187164, 1039417] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [387453, 1033289] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [613951, 1036574] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [1036673, 308324] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [258805, 1047319] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1042766, 1022708] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [682640, 1003115] +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [59990, 1047184] +processed_samples 4900 unjoint_samples 4900 joint_samples 15 [187164, 1039417] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [682640, 1003115] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [1036673, 308324] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [387453, 1033289] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [258805, 1047319] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1042766, 1022708] +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [613951, 1036574] +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559fddcae00] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fdec8dc0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [430574, 1047184] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [242265, 1045059] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [567324, 1047319] +[h264 @ 0x5649ee46c840] mmco: unref short failure +processed_samples 5000 unjoint_samples 5000 joint_samples 13 [993636, 1003115] +processed_samples 5000 unjoint_samples 5000 joint_samples 15 [526163, 1039417] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1036673, 661373] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [639696, 1033289] +processed_samples 5000 unjoint_samples 5000 joint_samples 13 [980516, 1036574] +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [242265, 1045059] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [430574, 1047184] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1036673, 661373] +processed_samples 5000 unjoint_samples 5000 joint_samples 15 [526163, 1039417] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [567324, 1047319] +processed_samples 5000 unjoint_samples 5000 joint_samples 13 [993636, 1003115] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [639696, 1033289] +processed_samples 5000 unjoint_samples 5000 joint_samples 13 [980516, 1036574] +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [141801, 1046799] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [626624, 1045059] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1007557, 381378] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [799334, 1047184] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [898833, 1047319] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [931628, 1033289] +[h264 @ 0x555a01ca1040] mmco: unref short failure +processed_samples 5100 unjoint_samples 5100 joint_samples 15 [925759, 1039417] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1036673, 1012305] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [141801, 1046799] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [799334, 1047184] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1007557, 381378] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [626624, 1045059] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [931628, 1033289] +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [898833, 1047319] +processed_samples 5100 unjoint_samples 5100 joint_samples 15 [925759, 1039417] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1036673, 1012305] +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [628817, 1046799] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [628817, 1046799] +processed_samples 5200 unjoint_samples 5200 joint_samples 16 [156295, 1046736] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [1045834, 131862] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [350115, 1046837] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1007557, 620477] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [1023091, 282688] +processed_samples 5200 unjoint_samples 5200 joint_samples 16 [156295, 1046736] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [1045834, 131862] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1007557, 620477] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [350115, 1046837] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [1023091, 282688] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1029032, 1047184] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1029032, 1047184] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [984714, 1045059] +[h264 @ 0x5559ffdc1480] mmco: unref short failure +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [984714, 1045059] +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01656bc0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +processed_samples 5300 unjoint_samples 5300 joint_samples 14 [928214, 1046799] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1038128, 261470] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [377930, 1047184] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [699267, 1046837] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1023091, 546262] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1045834, 474081] +processed_samples 5300 unjoint_samples 5300 joint_samples 16 [472923, 1046736] +processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1007557, 972932] +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +processed_samples 5300 unjoint_samples 5300 joint_samples 14 [928214, 1046799] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1038128, 261470] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [377930, 1047184] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1045834, 474081] +processed_samples 5300 unjoint_samples 5300 joint_samples 16 [472923, 1046736] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [699267, 1046837] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1023091, 546262] +[h264 @ 0x5559fd06cec0] mmco: unref short failure +processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1007557, 972932] +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f00553c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [119916, 1047023] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1046338, 293973] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1046338, 293973] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [119916, 1047023] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1038128, 508328] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1038128, 508328] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [703098, 1047184] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1045834, 755518] +processed_samples 5400 unjoint_samples 5400 joint_samples 16 [726574, 1046736] +processed_samples 5400 unjoint_samples 5400 joint_samples 16 [726574, 1046736] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [703098, 1047184] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1022332, 1046837] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1045834, 755518] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1022332, 1046837] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1023091, 800941] +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1023091, 800941] +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649eec6dac0] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fddcae00] mmco: unref short failure +[h264 @ 0x5559fddcae00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1046338, 531746] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1038128, 841101] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [349602, 1047023] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1046338, 531746] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [31641, 1029588] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [349602, 1047023] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1038128, 841101] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [31641, 1029588] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [366600, 1046889] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [366600, 1046889] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [952606, 1047184] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [952606, 1047184] +processed_samples 5500 unjoint_samples 5500 joint_samples 17 [977175, 102313] +processed_samples 5500 unjoint_samples 5500 joint_samples 17 [977175, 102313] +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [1045883, 77171] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [1045883, 77171] +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x555a01bbaf80] mmco: unref short failure +[h264 @ 0x555a01bbaf80] mmco: unref short failure +[h264 @ 0x5649f24b3080] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a01bbaf80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +processed_samples 5600 unjoint_samples 5600 joint_samples 15 [665989, 1047023] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [757814, 1046889] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1043995, 106898] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [138025, 1047184] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [313677, 1029588] +processed_samples 5600 unjoint_samples 5600 joint_samples 17 [977175, 340283] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1045883, 398188] +processed_samples 5600 unjoint_samples 5600 joint_samples 15 [1046338, 898228] +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [757814, 1046889] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1043995, 106898] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [138025, 1047184] +processed_samples 5600 unjoint_samples 5600 joint_samples 17 [977175, 340283] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1045883, 398188] +processed_samples 5600 unjoint_samples 5600 joint_samples 15 [665989, 1047023] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [313677, 1029588] +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +processed_samples 5600 unjoint_samples 5600 joint_samples 15 [1046338, 898228] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a008ecd80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f4b89c80] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [438013, 1047184] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1046338, 168892] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1043995, 440108] +processed_samples 5700 unjoint_samples 5700 joint_samples 17 [977175, 559449] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1048118, 1046889] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1045883, 910327] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [610348, 1029588] +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +processed_samples 5700 unjoint_samples 5700 joint_samples 15 [991968, 1047023] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [438013, 1047184] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1046338, 168892] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1043995, 440108] +processed_samples 5700 unjoint_samples 5700 joint_samples 17 [977175, 559449] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1045883, 910327] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1048118, 1046889] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [610348, 1029588] +processed_samples 5700 unjoint_samples 5700 joint_samples 15 [991968, 1047023] +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a00deb000] mmco: unref short failure +[h264 @ 0x555a00deb000] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1046338, 489222] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1043995, 761563] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [172071, 1039900] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [208456, 1047023] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [274278, 1047055] +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [704681, 1047184] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [977175, 825885] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [882082, 1029588] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1043995, 761563] +[h264 @ 0x555a045943c0] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1046338, 489222] +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [208456, 1047023] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [172071, 1039900] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [274278, 1047055] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [977175, 825885] +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [882082, 1029588] +[h264 @ 0x555a01c61780] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [704681, 1047184] +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649ee9b2500] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1046167, 219972] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [132904, 1028391] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [92033, 1047184] +processed_samples 5900 unjoint_samples 5900 joint_samples 18 [1004769, 103157] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [1046338, 844199] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [427988, 1039900] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [503140, 1047023] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [541781, 1047055] +processed_samples 5900 unjoint_samples 5900 joint_samples 18 [1004769, 103157] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1046167, 219972] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [92033, 1047184] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [132904, 1028391] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [427988, 1039900] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [541781, 1047055] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [503140, 1047023] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [1046338, 844199] +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [364407, 1047184] +processed_samples 6000 unjoint_samples 6000 joint_samples 18 [1004769, 426598] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [369661, 1028391] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [369661, 1028391] +processed_samples 6000 unjoint_samples 6000 joint_samples 18 [1004769, 426598] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1046167, 558366] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1046338, 178925] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1046338, 178925] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [364407, 1047184] +processed_samples 6000 unjoint_samples 6000 joint_samples 16 [793450, 1047023] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1046167, 558366] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [804873, 1047055] +processed_samples 6000 unjoint_samples 6000 joint_samples 16 [793450, 1047023] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [804873, 1047055] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [783853, 1039900] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [783853, 1039900] +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [1047271, 102335] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [131317, 1042149] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1046338, 527140] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [628454, 1028391] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [679324, 1047184] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [1004769, 794060] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1046167, 832626] +processed_samples 6100 unjoint_samples 6100 joint_samples 16 [1043235, 1047023] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [131317, 1042149] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [1047271, 102335] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [1004769, 794060] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1046338, 527140] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [628454, 1028391] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [679324, 1047184] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1046167, 832626] +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +processed_samples 6100 unjoint_samples 6100 joint_samples 16 [1043235, 1047023] +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [320922, 1047023] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [883233, 1028391] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [1046167, 138037] +processed_samples 6200 unjoint_samples 6200 joint_samples 19 [1030944, 169552] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [443597, 1042149] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [1047271, 408245] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [929179, 1047184] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [1046338, 759933] +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [320922, 1047023] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [1046167, 138037] +processed_samples 6200 unjoint_samples 6200 joint_samples 19 [1030944, 169552] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [1047271, 408245] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [443597, 1042149] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [883233, 1028391] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [929179, 1047184] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [1046338, 759933] +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [53144, 1038729] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [770343, 1042149] +processed_samples 6300 unjoint_samples 6300 joint_samples 19 [1030944, 426577] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1046650, 282275] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1042680, 77682] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1046167, 459292] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1047271, 714129] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [53144, 1038729] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [770343, 1042149] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1046650, 282275] +processed_samples 6300 unjoint_samples 6300 joint_samples 19 [1030944, 426577] +processed_samples 6300 unjoint_samples 6300 joint_samples 17 [704195, 1047023] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1042680, 77682] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1046167, 459292] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1047271, 714129] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +processed_samples 6300 unjoint_samples 6300 joint_samples 17 [704195, 1047023] +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1042680, 296069] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1041317, 1042149] +[h264 @ 0x555a02069380] mmco: unref short failure +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1042680, 296069] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1041317, 1042149] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [400344, 1038729] +processed_samples 6400 unjoint_samples 6400 joint_samples 19 [1030944, 707620] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1046650, 600778] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1046167, 731020] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [400344, 1038729] +processed_samples 6400 unjoint_samples 6400 joint_samples 19 [1030944, 707620] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1046650, 600778] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1046167, 731020] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1047271, 1022858] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1047271, 1022858] +processed_samples 6400 unjoint_samples 6400 joint_samples 17 [1044682, 1047023] +processed_samples 6400 unjoint_samples 6400 joint_samples 17 [1044682, 1047023] +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5559fddcae00] mmco: unref short failure +[h264 @ 0x5559fddcae00] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1042680, 632969] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [845034, 1038729] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [280065, 1047023] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [343590, 1048229] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [252091, 1037762] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1030944, 1019548] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1046650, 946021] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1046167, 998844] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [280065, 1047023] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [845034, 1038729] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1042680, 632969] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [343590, 1048229] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [252091, 1037762] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1046650, 946021] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1030944, 1019548] +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1046167, 998844] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eec6dac0] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649ee7de5c0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1027384, 113895] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1027384, 113895] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [37886, 1044635] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [37886, 1044635] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1046650, 145477] +processed_samples 6600 unjoint_samples 6600 joint_samples 18 [668607, 1047023] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [321006, 1030705] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1046650, 145477] +processed_samples 6600 unjoint_samples 6600 joint_samples 20 [1031402, 448483] +processed_samples 6600 unjoint_samples 6600 joint_samples 18 [668607, 1047023] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [549560, 1037762] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [321006, 1030705] +processed_samples 6600 unjoint_samples 6600 joint_samples 20 [1031402, 448483] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [549560, 1037762] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [673000, 1048229] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [673000, 1048229] +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +processed_samples 6700 unjoint_samples 6700 joint_samples 20 [1046189, 10876] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [337863, 1044635] +processed_samples 6700 unjoint_samples 6700 joint_samples 20 [1046189, 10876] +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [337863, 1044635] +[h264 @ 0x5649ee505800] mmco: unref short failure +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1027384, 466710] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1027384, 466710] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1046650, 542888] +processed_samples 6700 unjoint_samples 6700 joint_samples 20 [1031402, 787895] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1046650, 542888] +processed_samples 6700 unjoint_samples 6700 joint_samples 20 [1031402, 787895] +processed_samples 6700 unjoint_samples 6700 joint_samples 18 [970558, 1047023] +processed_samples 6700 unjoint_samples 6700 joint_samples 18 [970558, 1047023] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [568568, 1030705] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [568568, 1030705] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [826032, 1037762] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [826032, 1037762] +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a01c01080] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [271761, 1047023] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [271761, 1047023] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1027384, 781072] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [1048170, 25513] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [1048170, 25513] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1027384, 781072] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [1046189, 348499] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [1046189, 348499] +processed_samples 6800 unjoint_samples 6800 joint_samples 21 [109090, 1016327] +processed_samples 6800 unjoint_samples 6800 joint_samples 21 [109090, 1016327] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1046650, 876889] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1046650, 876889] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [647843, 1044635] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [647843, 1044635] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [934686, 1030705] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [934686, 1030705] +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ee5e1f80] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [12617, 1047814] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [12617, 1047814] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1022264, 234298] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1022264, 234298] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1046650, 126573] +processed_samples 6900 unjoint_samples 6900 joint_samples 19 [627764, 1047023] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1046650, 126573] +processed_samples 6900 unjoint_samples 6900 joint_samples 19 [627764, 1047023] +processed_samples 6900 unjoint_samples 6900 joint_samples 19 [1042702, 1044635] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1048170, 520650] +processed_samples 6900 unjoint_samples 6900 joint_samples 21 [410801, 1016327] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1046189, 859306] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1048170, 520650] +processed_samples 6900 unjoint_samples 6900 joint_samples 19 [1042702, 1044635] +processed_samples 6900 unjoint_samples 6900 joint_samples 21 [410801, 1016327] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1046189, 859306] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a01bbaf80] mmco: unref short failure +[h264 @ 0x555a01bbaf80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1022264, 507371] +[h264 @ 0x5559ffdc1480] mmco: unref short failure +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [226430, 1047106] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [289600, 1047814] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1046650, 487061] +processed_samples 7000 unjoint_samples 7000 joint_samples 21 [93852, 1042906] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1048170, 811652] +processed_samples 7000 unjoint_samples 7000 joint_samples 19 [898532, 1047023] +processed_samples 7000 unjoint_samples 7000 joint_samples 21 [733359, 1016327] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1022264, 507371] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [226430, 1047106] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [289600, 1047814] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1046650, 487061] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1048170, 811652] +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +processed_samples 7000 unjoint_samples 7000 joint_samples 19 [898532, 1047023] +processed_samples 7000 unjoint_samples 7000 joint_samples 21 [93852, 1042906] +processed_samples 7000 unjoint_samples 7000 joint_samples 21 [733359, 1016327] +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a008ecd80] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ee5e1f80] mmco: unref short failure +[h264 @ 0x5649ee5e1f80] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f24b3080] mmco: unref short failure +[h264 @ 0x5649f24b3080] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559fd7d6f00] mmco: unref short failure +[h264 @ 0x5559fd7d6f00] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [560809, 1047106] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1046650, 741356] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [648703, 1047814] +processed_samples 7100 unjoint_samples 7100 joint_samples 21 [88917, 1047729] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [136766, 1047023] +processed_samples 7100 unjoint_samples 7100 joint_samples 21 [449883, 1042906] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1022264, 887134] +processed_samples 7100 unjoint_samples 7100 joint_samples 21 [1027960, 1025959] +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1046650, 741356] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [560809, 1047106] +processed_samples 7100 unjoint_samples 7100 joint_samples 21 [88917, 1047729] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [648703, 1047814] +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [136766, 1047023] +processed_samples 7100 unjoint_samples 7100 joint_samples 21 [449883, 1042906] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1022264, 887134] +processed_samples 7100 unjoint_samples 7100 joint_samples 21 [1027960, 1025959] +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a01dbd0c0] mmco: unref short failure +[h264 @ 0x555a01dbd0c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [117692, 1046949] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [117692, 1046949] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [421087, 1047023] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [862925, 1047106] +processed_samples 7200 unjoint_samples 7200 joint_samples 22 [392276, 1047241] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [421087, 1047023] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [929831, 1047814] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [420804, 1047729] +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +processed_samples 7200 unjoint_samples 7200 joint_samples 22 [392276, 1047241] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [862925, 1047106] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [420804, 1047729] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [929831, 1047814] +[h264 @ 0x5649f26f01c0] mmco: unref short failure +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [755432, 1042906] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [1046650, 1013162] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [755432, 1042906] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [1046650, 1013162] +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef778080] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649f2c7b340] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1047065, 216542] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1043644, 160378] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1043644, 160378] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1047065, 216542] +processed_samples 7300 unjoint_samples 7300 joint_samples 22 [1047180, 11000] +processed_samples 7300 unjoint_samples 7300 joint_samples 22 [1047180, 11000] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [343501, 1046704] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [343501, 1046704] +processed_samples 7300 unjoint_samples 7300 joint_samples 20 [753653, 1047023] +processed_samples 7300 unjoint_samples 7300 joint_samples 20 [753653, 1047023] +processed_samples 7300 unjoint_samples 7300 joint_samples 22 [687319, 1047241] +processed_samples 7300 unjoint_samples 7300 joint_samples 22 [687319, 1047241] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [532855, 1046949] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [532855, 1046949] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [704456, 1047729] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [704456, 1047729] +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1026663, 31663] +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1026663, 31663] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [967174, 1047729] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [967174, 1047729] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1047065, 497602] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1047065, 497602] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1043644, 528225] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1043644, 528225] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [776663, 1046949] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [776663, 1046949] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [557112, 1046704] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [557112, 1046704] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [1047180, 332698] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [1047180, 332698] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [965373, 1047241] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [965373, 1047241] +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1026663, 273522] +processed_samples 7500 unjoint_samples 7500 joint_samples 23 [229957, 1047241] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1041766, 199325] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1047065, 862282] +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1047180, 651690] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1043644, 821705] +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1037382, 1046949] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [847600, 1046704] +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1026663, 273522] +processed_samples 7500 unjoint_samples 7500 joint_samples 23 [229957, 1047241] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1041766, 199325] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1047065, 862282] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1047180, 651690] +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1043644, 821705] +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [847600, 1046704] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1037382, 1046949] +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559ffc94e00] mmco: unref short failure +[h264 @ 0x5559ffc94e00] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559fd8c62c0] mmco: unref short failure +[h264 @ 0x5559fd8c62c0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f00553c0] mmco: unref short failure +[h264 @ 0x5649f00553c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [190869, 997682] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1036661, 193993] +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +processed_samples 7600 unjoint_samples 7600 joint_samples 23 [492675, 1047241] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1041766, 541558] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1047180, 953300] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [403092, 1046949] +processed_samples 7600 unjoint_samples 7600 joint_samples 21 [1026663, 545583] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1045878, 245108] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [190869, 997682] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1036661, 193993] +processed_samples 7600 unjoint_samples 7600 joint_samples 23 [492675, 1047241] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1041766, 541558] +processed_samples 7600 unjoint_samples 7600 joint_samples 21 [1026663, 545583] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [403092, 1046949] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1047180, 953300] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1045878, 245108] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [451395, 997682] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [451395, 997682] +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1036661, 501386] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1036661, 501386] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [702166, 1046949] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [702166, 1046949] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1045878, 578684] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1045878, 578684] +processed_samples 7700 unjoint_samples 7700 joint_samples 23 [228736, 1044116] +processed_samples 7700 unjoint_samples 7700 joint_samples 23 [228736, 1044116] +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +processed_samples 7700 unjoint_samples 7700 joint_samples 21 [1026663, 814539] +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +processed_samples 7700 unjoint_samples 7700 joint_samples 21 [1026663, 814539] +processed_samples 7700 unjoint_samples 7700 joint_samples 23 [791887, 1047241] +processed_samples 7700 unjoint_samples 7700 joint_samples 23 [791887, 1047241] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1041766, 881148] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1041766, 881148] +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a020decc0] mmco: unref short failure +[h264 @ 0x555a020decc0] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [1047475, 90400] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [553619, 1044116] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [553619, 1044116] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [1047475, 90400] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [809672, 997682] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1036661, 886667] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [809672, 997682] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1036661, 886667] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1045878, 857994] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [987884, 1046949] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1045878, 857994] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [987884, 1046949] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [1046959, 1047241] +processed_samples 7800 unjoint_samples 7800 joint_samples 21 [1040458, 1040562] +processed_samples 7800 unjoint_samples 7800 joint_samples 21 [1040458, 1040562] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [1046959, 1047241] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fe034340] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +processed_samples 7900 unjoint_samples 7900 joint_samples 22 [1047361, 270413] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [173152, 1040042] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [153090, 1038342] +processed_samples 7900 unjoint_samples 7900 joint_samples 24 [1046959, 305001] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [180848, 1038095] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1047475, 338689] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [270168, 1046949] +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [912201, 1044116] +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01603b40] mmco: unref short failure +[h264 @ 0x555a01603b40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +processed_samples 7900 unjoint_samples 7900 joint_samples 22 [1047361, 270413] +[h264 @ 0x5649eea7ce40] mmco: unref short failure +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [173152, 1040042] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [153090, 1038342] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [270168, 1046949] +processed_samples 7900 unjoint_samples 7900 joint_samples 24 [1046959, 305001] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [180848, 1038095] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1047475, 338689] +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [912201, 1044116] +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a012d1080] mmco: unref short failure +[h264 @ 0x555a012d1080] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a059c9e40] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +[h264 @ 0x5649ee146580] mmco: unref short failure +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [542277, 1038095] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [524852, 1046949] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [420972, 1038342] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [451751, 1040042] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1047475, 823289] +processed_samples 8000 unjoint_samples 8000 joint_samples 24 [1039352, 179325] +processed_samples 8000 unjoint_samples 8000 joint_samples 22 [1047361, 557644] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [524852, 1046949] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [542277, 1038095] +processed_samples 8000 unjoint_samples 8000 joint_samples 24 [1046959, 604582] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [451751, 1040042] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [420972, 1038342] +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +processed_samples 8000 unjoint_samples 8000 joint_samples 22 [1047361, 557644] +processed_samples 8000 unjoint_samples 8000 joint_samples 24 [1039352, 179325] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1047475, 823289] +processed_samples 8000 unjoint_samples 8000 joint_samples 24 [1046959, 604582] +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fe034340] mmco: unref short failure +[h264 @ 0x5559fe034340] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5559fe0984c0] mmco: unref short failure +[h264 @ 0x5559fe0984c0] mmco: unref short failure +[h264 @ 0x5559fe0984c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [1047475, 92479] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [841637, 1038095] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [826311, 1040042] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [682605, 1038342] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [1046959, 885564] +processed_samples 8100 unjoint_samples 8100 joint_samples 22 [1047361, 901929] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [836071, 1046949] +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [1039352, 435904] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [1047475, 92479] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [841637, 1038095] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [682605, 1038342] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [1046959, 885564] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [826311, 1040042] +processed_samples 8100 unjoint_samples 8100 joint_samples 22 [1047361, 901929] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [836071, 1046949] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [1039352, 435904] +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee5e1f80] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1046784, 112401] +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [127718, 1036268] +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [38114, 1046907] +processed_samples 8200 unjoint_samples 8200 joint_samples 25 [88897, 1046533] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1047475, 479969] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [166601, 1046949] +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [978353, 1038342] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1039352, 658160] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1046784, 112401] +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [127718, 1036268] +[h264 @ 0x5559fd252180] mmco: unref short failure +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [38114, 1046907] +processed_samples 8200 unjoint_samples 8200 joint_samples 25 [88897, 1046533] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [166601, 1046949] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1047475, 479969] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1039352, 658160] +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [978353, 1038342] +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649ef6c2640] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649eef38580] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a020decc0] mmco: unref short failure +[h264 @ 0x555a020decc0] mmco: unref short failure +[h264 @ 0x555a020decc0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [444120, 1046907] +[h264 @ 0x5649f0ff4540] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [232124, 1045285] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [471537, 1046949] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1046784, 502722] +processed_samples 8300 unjoint_samples 8300 joint_samples 23 [530384, 1036268] +processed_samples 8300 unjoint_samples 8300 joint_samples 25 [541212, 1046533] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1047475, 823090] +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1039352, 993587] +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 25 [541212, 1046533] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [232124, 1045285] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [444120, 1046907] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1046784, 502722] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [471537, 1046949] +processed_samples 8300 unjoint_samples 8300 joint_samples 23 [530384, 1036268] +[h264 @ 0x555a01fadf40] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1047475, 823090] +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1039352, 993587] +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1046784, 764002] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [671898, 1045285] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [269407, 1043464] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [1047475, 100697] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [876980, 1046533] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [866861, 1046949] +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +processed_samples 8400 unjoint_samples 8400 joint_samples 23 [1015102, 1036268] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [862943, 1046907] +[h264 @ 0x5649ee505800] mmco: unref short failure +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [269407, 1043464] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [1047475, 100697] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1046784, 764002] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [671898, 1045285] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [876980, 1046533] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [862943, 1046907] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [866861, 1046949] +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +processed_samples 8400 unjoint_samples 8400 joint_samples 23 [1015102, 1036268] +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a010faa00] mmco: unref short failure +[h264 @ 0x555a010faa00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649eeca8b40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] Missing reference picture, default is 65530 +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] Missing reference picture, default is 65530 +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f4b89c80] mmco: unref short failure +[h264 @ 0x5649f4b89c80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1040042, 256314] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [1047475, 407578] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [122087, 1046907] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [1032949, 217199] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [606117, 1043464] +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1046784, 1022117] +processed_samples 8500 unjoint_samples 8500 joint_samples 26 [100937, 1046533] +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [973145, 1045285] +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1040042, 256314] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [1047475, 407578] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [122087, 1046907] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [1032949, 217199] +processed_samples 8500 unjoint_samples 8500 joint_samples 26 [100937, 1046533] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [606117, 1043464] +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [973145, 1045285] +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1046784, 1022117] +[h264 @ 0x5559fd923e00] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [167071, 1045285] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1047475, 684053] +processed_samples 8600 unjoint_samples 8600 joint_samples 26 [439469, 1046533] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1047181, 277034] +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +processed_samples 8600 unjoint_samples 8600 joint_samples 24 [1040042, 611062] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1032949, 580621] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [395778, 1046907] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [856930, 1043464] +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [167071, 1045285] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1047475, 684053] +processed_samples 8600 unjoint_samples 8600 joint_samples 26 [439469, 1046533] +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1047181, 277034] +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [395778, 1046907] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1032949, 580621] +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [856930, 1043464] +processed_samples 8600 unjoint_samples 8600 joint_samples 24 [1040042, 611062] +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [1047475, 15799] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [905731, 1046907] +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [1045874, 85168] +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [1047475, 15799] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [905731, 1046907] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1047181, 547941] +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [1045874, 85168] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [549219, 1045285] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1032949, 903571] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [549219, 1045285] +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [756152, 1046533] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1047181, 547941] +processed_samples 8700 unjoint_samples 8700 joint_samples 24 [1040042, 863555] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1032949, 903571] +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +processed_samples 8700 unjoint_samples 8700 joint_samples 24 [1040042, 863555] +[h264 @ 0x555a01ec2500] mmco: unref short failure +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [756152, 1046533] +[h264 @ 0x555a01d82f80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fd02cb40] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x555a01375780] mmco: unref short failure +[h264 @ 0x555a01375780] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a01375780] mmco: unref short failure +[h264 @ 0x555a01375780] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1038728, 266508] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1047277, 184480] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [104759, 1046728] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1047475, 320617] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1045874, 453231] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1047181, 906245] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [873743, 1045285] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1038728, 266508] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [104759, 1046728] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1047277, 184480] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1047475, 320617] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [995735, 1046533] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1047181, 906245] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [873743, 1045285] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1045874, 453231] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [995735, 1046533] +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01bbaf80] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5559fd02cb40] mmco: unref short failure +[h264 @ 0x5559fd02cb40] mmco: unref short failure +[h264 @ 0x5649ee3886c0] mmco: unref short failure +[h264 @ 0x5649ee3886c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd02cb40] mmco: unref short failure +[h264 @ 0x5559fd02cb40] mmco: unref short failure +[h264 @ 0x5559fd02cb40] mmco: unref short failure +[h264 @ 0x5559fd02cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [123257, 1046667] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [110864, 1045285] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [110864, 1045285] +processed_samples 8900 unjoint_samples 8900 joint_samples 27 [1037217, 201421] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [123257, 1046667] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1038728, 538213] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1038728, 538213] +processed_samples 8900 unjoint_samples 8900 joint_samples 25 [1047277, 683180] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [471169, 1046728] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1047475, 676775] +processed_samples 8900 unjoint_samples 8900 joint_samples 25 [1047277, 683180] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [471169, 1046728] +processed_samples 8900 unjoint_samples 8900 joint_samples 27 [1037217, 201421] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1045874, 725442] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1045874, 725442] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1047475, 676775] +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ee3886c0] mmco: unref short failure +[h264 @ 0x5649ee3886c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +processed_samples 9000 unjoint_samples 9000 joint_samples 25 [1047277, 950375] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [512455, 1045285] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [464782, 1046667] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1038728, 849792] +processed_samples 9000 unjoint_samples 9000 joint_samples 27 [1037217, 530048] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1045874, 994156] +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [704371, 1046728] +[h264 @ 0x555a04eb3f00] mmco: unref short failure +[h264 @ 0x555a04eb3f00] mmco: unref short failure +[h264 @ 0x555a04eb3f00] mmco: unref short failure +[h264 @ 0x555a04eb3f00] mmco: unref short failure +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1047475, 970385] +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +processed_samples 9000 unjoint_samples 9000 joint_samples 25 [1047277, 950375] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [512455, 1045285] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [464782, 1046667] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1038728, 849792] +processed_samples 9000 unjoint_samples 9000 joint_samples 27 [1037217, 530048] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1045874, 994156] +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [704371, 1046728] +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1047475, 970385] +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f70a8fc0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [91770, 1047083] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [225650, 1046705] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [91770, 1047083] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [225650, 1046705] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1045874, 296818] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [250395, 1033542] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [795430, 1046667] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [250395, 1033542] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1045874, 296818] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [795430, 1046667] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [895416, 1045285] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [895416, 1045285] +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1037217, 874982] +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1037217, 874982] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [937867, 1046728] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [937867, 1046728] +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee5e1f80] mmco: unref short failure +[h264 @ 0x5649ee5e1f80] mmco: unref short failure +[h264 @ 0x5649ee5e1f80] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [73991, 1046667] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1012937, 220560] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [462192, 1033542] +processed_samples 9200 unjoint_samples 9200 joint_samples 28 [154822, 1039938] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [172362, 1046728] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1045874, 648534] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [465065, 1047083] +processed_samples 9200 unjoint_samples 9200 joint_samples 26 [530122, 1046705] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [73991, 1046667] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [172362, 1046728] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1012937, 220560] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [462192, 1033542] +processed_samples 9200 unjoint_samples 9200 joint_samples 28 [154822, 1039938] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1045874, 648534] +processed_samples 9200 unjoint_samples 9200 joint_samples 26 [530122, 1046705] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [465065, 1047083] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1012937, 522162] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [882129, 1033542] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [492165, 1046667] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [460248, 1046728] +processed_samples 9300 unjoint_samples 9300 joint_samples 28 [552738, 1039938] +processed_samples 9300 unjoint_samples 9300 joint_samples 26 [908179, 1046705] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [810535, 1047083] +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1045874, 936492] +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1012937, 522162] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [810535, 1047083] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [882129, 1033542] +processed_samples 9300 unjoint_samples 9300 joint_samples 28 [552738, 1039938] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [492165, 1046667] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [460248, 1046728] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1045874, 936492] +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +processed_samples 9300 unjoint_samples 9300 joint_samples 26 [908179, 1046705] +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [924978, 1046667] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [206860, 1046819] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1012937, 784226] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [158827, 1047083] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [220743, 1046128] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [1044569, 139781] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [809095, 1039938] +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [876727, 1046728] +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [924978, 1046667] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [206860, 1046819] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1012937, 784226] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [158827, 1047083] +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [220743, 1046128] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [1044569, 139781] +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [809095, 1039938] +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [876727, 1046728] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x555a01603b40] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1040380, 142585] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [137705, 1019544] +processed_samples 9500 unjoint_samples 9500 joint_samples 29 [1024028, 124413] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1038227, 242054] +[h264 @ 0x5649f1a022c0] mmco: unref short failure +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [410129, 1047083] +[h264 @ 0x5649f1a022c0] mmco: unref short failure +processed_samples 9500 unjoint_samples 9500 joint_samples 27 [512095, 1046819] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1044569, 396197] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [137705, 1019544] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1040380, 142585] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [629736, 1046128] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1038227, 242054] +processed_samples 9500 unjoint_samples 9500 joint_samples 29 [1024028, 124413] +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +processed_samples 9500 unjoint_samples 9500 joint_samples 27 [512095, 1046819] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [410129, 1047083] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1044569, 396197] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [629736, 1046128] +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1038227, 628474] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [478086, 1019544] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [669891, 1047083] +processed_samples 9600 unjoint_samples 9600 joint_samples 29 [1024028, 448485] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1040380, 432600] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1044569, 719903] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [920557, 1046128] +processed_samples 9600 unjoint_samples 9600 joint_samples 27 [795874, 1046819] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1038227, 628474] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [478086, 1019544] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [669891, 1047083] +processed_samples 9600 unjoint_samples 9600 joint_samples 29 [1024028, 448485] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1040380, 432600] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1044569, 719903] +processed_samples 9600 unjoint_samples 9600 joint_samples 27 [795874, 1046819] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [920557, 1046128] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1040380, 753225] +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [1028990, 185829] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [805458, 1019544] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1038227, 910477] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1013319, 298023] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [994371, 1047083] +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [1024028, 795797] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1040380, 753225] +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [1028990, 185829] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1044569, 1033781] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [805458, 1019544] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1013319, 298023] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1038227, 910477] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1044569, 1033781] +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [1024028, 795797] +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [994371, 1047083] +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fd7d6f00] mmco: unref short failure +[h264 @ 0x5559fd7d6f00] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [203755, 1047083] +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [1013319, 591862] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [81067, 1039558] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [290606, 1046311] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [197089, 1044398] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [203755, 1047083] +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [1013319, 591862] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1038227, 125259] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [81067, 1039558] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [197089, 1044398] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [290606, 1046311] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1038227, 125259] +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1028990, 462177] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1028990, 462177] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1024028, 1011840] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1024028, 1011840] +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ef544040] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a02722240] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a008ecd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [584501, 1047083] +[h264 @ 0x5649f2cd1440] mmco: unref short failure +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [584501, 1047083] +processed_samples 9900 unjoint_samples 9900 joint_samples 30 [1031356, 468096] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [386430, 1039558] +[h264 @ 0x555a02069380] mmco: unref short failure +processed_samples 9900 unjoint_samples 9900 joint_samples 30 [1031356, 468096] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [386430, 1039558] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [708788, 1044398] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [1028990, 714164] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [1038227, 385286] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [588057, 1046311] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [1038227, 385286] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [588057, 1046311] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [1028990, 714164] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [708788, 1044398] +processed_samples 9900 unjoint_samples 9900 joint_samples 28 [1013319, 953069] +processed_samples 9900 unjoint_samples 9900 joint_samples 28 [1013319, 953069] +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x555a01d82f80] mmco: unref short failure +[h264 @ 0x555a01d82f80] mmco: unref short failure +[h264 @ 0x555a01d82f80] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559fdd2fec0] mmco: unref short failure +[h264 @ 0x5559fdd2fec0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5559fdd2fec0] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [1039497, 93104] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [1048318, 146360] +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [1039497, 93104] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [1048318, 146360] +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [1031356, 786194] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [674954, 1039558] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [892478, 1046311] +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [1031356, 786194] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [1038227, 686845] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [674954, 1039558] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [892478, 1046311] +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [1038227, 686845] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [964007, 1047083] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [964007, 1047083] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [1028990, 1031122] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [1028990, 1031122] +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649f2c7b340] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f10448c0] mmco: unref short failure +[h264 @ 0x5649f10448c0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [1048318, 428681] +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1039497, 395314] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1045781, 248108] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [959819, 399025] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [911937, 1039558] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [1038227, 931388] +[h264 @ 0x5649f23cfd80] mmco: unref short failure +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1031356, 1034010] +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1048096, 356039] +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [1048318, 428681] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1039497, 395314] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1045781, 248108] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [959819, 399025] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [911937, 1039558] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1031356, 1034010] +[h264 @ 0x555a01ca1040] mmco: unref short failure +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [1038227, 931388] +[h264 @ 0x555a01ca1040] mmco: unref short failure +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1048096, 356039] +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +processed_samples 10200 unjoint_samples 10200 joint_samples 31 [198851, 1047383] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [235979, 1041682] +processed_samples 10200 unjoint_samples 10200 joint_samples 31 [198851, 1047383] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [363659, 1010132] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1039497, 626871] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [235979, 1041682] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [959819, 714686] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [363659, 1010132] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1048096, 696454] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [959819, 714686] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1045781, 514167] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1045781, 514167] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1039497, 626871] +processed_samples 10200 unjoint_samples 10200 joint_samples 29 [1048318, 719459] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1048096, 696454] +processed_samples 10200 unjoint_samples 10200 joint_samples 29 [1048318, 719459] +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [80313, 1011814] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [669771, 1010132] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [669771, 1010132] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [80313, 1011814] +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [39135, 1034652] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1045781, 761153] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1045781, 761153] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [460680, 1041682] +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [39135, 1034652] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [460680, 1041682] +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [551478, 1047383] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1039497, 933155] +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [551478, 1047383] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [966289, 967539] +[h264 @ 0x5649eec6dac0] mmco: unref short failure +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1039497, 933155] +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [966289, 967539] +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01603b40] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559ffce3ac0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559ffce3ac0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [435968, 1011814] +[h264 @ 0x5649f2282280] mmco: unref short failure +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [183452, 1041048] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [172853, 1038532] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [977505, 1010132] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [426840, 1034652] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [776971, 1041682] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [1045781, 975766] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [844115, 1047383] +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [435968, 1011814] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [172853, 1038532] +[h264 @ 0x555a0105a980] mmco: unref short failure +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [183452, 1041048] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [977505, 1010132] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [776971, 1041682] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [426840, 1034652] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [1045781, 975766] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [844115, 1047383] +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [1033447, 194045] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [467014, 1038532] +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 30 [782697, 1011814] +processed_samples 10500 unjoint_samples 10500 joint_samples 32 [156504, 1048173] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [1033447, 194045] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [218036, 1034239] +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [467014, 1038532] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [461965, 1041048] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [712238, 1034652] +processed_samples 10500 unjoint_samples 10500 joint_samples 30 [782697, 1011814] +[h264 @ 0x555a0030ad00] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 32 [156504, 1048173] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [218036, 1034239] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [461965, 1041048] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [712238, 1034652] +processed_samples 10500 unjoint_samples 10500 joint_samples 30 [1041885, 1041682] +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 30 [1041885, 1041682] +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a02090700] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +processed_samples 10600 unjoint_samples 10600 joint_samples 32 [1041514, 11719] +processed_samples 10600 unjoint_samples 10600 joint_samples 32 [1041514, 11719] +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +processed_samples 10600 unjoint_samples 10600 joint_samples 32 [383211, 1048173] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [447013, 1034239] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [1037875, 96482] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [1037875, 96482] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [306471, 1047224] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [306471, 1047224] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [1033447, 583281] +processed_samples 10600 unjoint_samples 10600 joint_samples 32 [383211, 1048173] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [447013, 1034239] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [698356, 1041048] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [1033447, 583281] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [698356, 1041048] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [774199, 1038532] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [774199, 1038532] +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01603b40] mmco: unref short failure +[h264 @ 0x555a01603b40] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [128410, 1045523] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1037875, 472499] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [624422, 1047224] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [1041514, 312682] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [128410, 1045523] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1033447, 841174] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [704600, 1048173] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1037875, 472499] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1043363, 1044551] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [624422, 1047224] +[h264 @ 0x5649ef013980] mmco: unref short failure +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1033447, 841174] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [1041514, 312682] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [892183, 1034239] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [704600, 1048173] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1043363, 1044551] +[h264 @ 0x555a00a91800] mmco: unref short failure +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [892183, 1034239] +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fddd7a80] mmco: unref short failure +[h264 @ 0x5559fddd7a80] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1035752, 162888] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1045921, 146846] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [449003, 1045523] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [315387, 1044551] +processed_samples 10800 unjoint_samples 10800 joint_samples 31 [1037875, 835556] +processed_samples 10800 unjoint_samples 10800 joint_samples 31 [942124, 1047224] +processed_samples 10800 unjoint_samples 10800 joint_samples 33 [1032065, 29516] +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1035752, 162888] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [449003, 1045523] +processed_samples 10800 unjoint_samples 10800 joint_samples 31 [1037875, 835556] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1045921, 146846] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [315387, 1044551] +processed_samples 10800 unjoint_samples 10800 joint_samples 31 [942124, 1047224] +processed_samples 10800 unjoint_samples 10800 joint_samples 33 [1032065, 29516] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1041514, 655987] +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1041514, 655987] +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [190252, 1037338] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [190252, 1037338] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1035752, 459328] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1046966, 215279] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1035752, 459328] +processed_samples 10900 unjoint_samples 10900 joint_samples 33 [1032065, 351246] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1046966, 215279] +processed_samples 10900 unjoint_samples 10900 joint_samples 33 [1032065, 351246] +processed_samples 10900 unjoint_samples 10900 joint_samples 33 [57951, 1023521] +processed_samples 10900 unjoint_samples 10900 joint_samples 33 [57951, 1023521] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1045921, 403937] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1045921, 403937] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [575214, 1044551] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [575214, 1044551] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [722456, 1045523] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [722456, 1045523] +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [489455, 1037338] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1046966, 552808] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1045921, 661662] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1035752, 843715] +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [381536, 1023521] +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1032065, 659269] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [893684, 1044551] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [489455, 1037338] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1046966, 552808] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1035752, 843715] +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [381536, 1023521] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1045921, 661662] +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1044162, 75800] +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1032065, 659269] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [893684, 1044551] +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1044162, 75800] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [90226, 1044222] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [90226, 1044222] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [35137, 1023359] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [35137, 1023359] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1045055, 96004] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1045055, 96004] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1044162, 432440] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1044162, 432440] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [626501, 1023521] +processed_samples 11100 unjoint_samples 11100 joint_samples 32 [917931, 1037338] +processed_samples 11100 unjoint_samples 11100 joint_samples 32 [917931, 1037338] +processed_samples 11100 unjoint_samples 11100 joint_samples 32 [1046966, 1009349] +processed_samples 11100 unjoint_samples 11100 joint_samples 32 [1046966, 1009349] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1032065, 1004680] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1032065, 1004680] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [626501, 1023521] +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [370324, 1044222] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1006570, 223978] +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1044162, 740665] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [272745, 1046448] +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [937926, 1023521] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [258289, 1023359] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1045055, 431566] +processed_samples 11200 unjoint_samples 11200 joint_samples 34 [316008, 1046612] +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1006570, 223978] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [272745, 1046448] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [370324, 1044222] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [258289, 1023359] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1045055, 431566] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1044162, 740665] +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +processed_samples 11200 unjoint_samples 11200 joint_samples 34 [316008, 1046612] +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [937926, 1023521] +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5649f2c7b340] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x555a020decc0] mmco: unref short failure +[h264 @ 0x555a020decc0] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1006570, 490094] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [607037, 1044222] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1006570, 490094] +processed_samples 11300 unjoint_samples 11300 joint_samples 34 [1034746, 237539] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [607037, 1044222] +processed_samples 11300 unjoint_samples 11300 joint_samples 34 [671219, 1046612] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [602074, 1023359] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [783103, 1046448] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1045055, 705299] +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +processed_samples 11300 unjoint_samples 11300 joint_samples 34 [1034746, 237539] +processed_samples 11300 unjoint_samples 11300 joint_samples 34 [671219, 1046612] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [783103, 1046448] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1044162, 1039867] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [602074, 1023359] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1045055, 705299] +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1044162, 1039867] +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [847191, 1023359] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [950986, 1046612] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1045028, 137747] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [93122, 1042734] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1034746, 494401] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1046638, 420718] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [912762, 1044222] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [847191, 1023359] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [950986, 1046612] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [93122, 1042734] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1045028, 137747] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [1006570, 814426] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1046638, 420718] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1034746, 494401] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [912762, 1044222] +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [1006570, 814426] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a00e1f6c0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [143094, 1032155] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1031307, 273919] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [143094, 1032155] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1031307, 273919] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1010911, 127257] +processed_samples 11500 unjoint_samples 11500 joint_samples 35 [178384, 1046612] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1010911, 127257] +processed_samples 11500 unjoint_samples 11500 joint_samples 35 [178384, 1046612] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1045028, 395499] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1034746, 745511] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1045028, 395499] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [400243, 1042734] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1046638, 666786] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [400243, 1042734] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1034746, 745511] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1046638, 666786] +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f2c7b340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1031307, 748776] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [464788, 1032155] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [464788, 1032155] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1031307, 748776] +processed_samples 11600 unjoint_samples 11600 joint_samples 35 [84381, 1028799] +processed_samples 11600 unjoint_samples 11600 joint_samples 35 [84381, 1028799] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1010911, 433321] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1010911, 433321] +processed_samples 11600 unjoint_samples 11600 joint_samples 35 [452627, 1046612] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [717191, 1042734] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [717191, 1042734] +processed_samples 11600 unjoint_samples 11600 joint_samples 35 [452627, 1046612] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1045028, 868530] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1045028, 868530] +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1046638, 920461] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1046638, 920461] +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [762107, 1032155] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [1010911, 863024] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [1043849, 56536] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [1010911, 863024] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [762107, 1032155] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [284268, 1010837] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [1043849, 56536] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [1046638, 200875] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [1043707, 1043534] +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [284268, 1010837] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [1046638, 200875] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [324394, 1028799] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [1043707, 1043534] +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [757912, 1046612] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [324394, 1028799] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [757912, 1046612] +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [52957, 1044877] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1043849, 366158] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [52957, 1044877] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1046638, 508637] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1046638, 508637] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1043849, 366158] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [581835, 1028799] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +processed_samples 11800 unjoint_samples 11800 joint_samples 36 [1035315, 165408] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1047582, 226100] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1043425, 136197] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [581835, 1028799] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 11800 unjoint_samples 11800 joint_samples 36 [1035315, 165408] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1047582, 226100] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1043425, 136197] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [650628, 1010837] +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [650628, 1010837] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5559fe26cd00] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [353851, 1044877] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [353851, 1044877] +processed_samples 11900 unjoint_samples 11900 joint_samples 36 [1035315, 604488] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [1043425, 503852] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [1043849, 681942] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [1047582, 516773] +processed_samples 11900 unjoint_samples 11900 joint_samples 36 [1035315, 604488] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [1043425, 503852] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [1043849, 681942] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [1047582, 516773] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [960099, 1010837] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [960099, 1010837] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [1046638, 860170] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [1046638, 860170] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [859390, 1028799] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [859390, 1028799] +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1003444, 360813] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1043425, 801378] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [236351, 1037505] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1003444, 360813] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [236351, 1037505] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [242872, 1046135] +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1043425, 801378] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [796309, 1044877] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [242872, 1046135] +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1043849, 983242] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1035315, 978031] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [796309, 1044877] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1043849, 983242] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1047582, 865435] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1035315, 978031] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1047582, 865435] +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a01136740] mmco: unref short failure +[h264 @ 0x555a01136740] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [145136, 1046501] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [993547, 135709] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1043849, 351598] +processed_samples 12100 unjoint_samples 12100 joint_samples 37 [196550, 1047132] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1048014, 119398] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [532513, 1037505] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [145136, 1046501] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [993547, 135709] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1048014, 119398] +processed_samples 12100 unjoint_samples 12100 joint_samples 37 [196550, 1047132] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1003444, 680620] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1043849, 351598] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [613456, 1046135] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [532513, 1037505] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1003444, 680620] +[h264 @ 0x5649efe68b80] mmco: unref short failure +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [613456, 1046135] +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [993547, 473142] +processed_samples 12200 unjoint_samples 12200 joint_samples 37 [504885, 1047132] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1048014, 333278] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1043849, 693272] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [433142, 1046501] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [804067, 1037505] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1003444, 914513] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [993547, 473142] +processed_samples 12200 unjoint_samples 12200 joint_samples 37 [504885, 1047132] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1048014, 333278] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1043849, 693272] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [433142, 1046501] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [804067, 1037505] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1003444, 914513] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [931466, 1046135] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [931466, 1046135] +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a020decc0] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649ef422040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5559ffc94e00] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [251464, 1046430] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [1046831, 299400] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [1048014, 615495] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [1043849, 990824] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [1046797, 218884] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [993547, 791688] +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [688565, 1046501] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [983928, 1047132] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [251464, 1046430] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [1046831, 299400] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [1046797, 218884] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [1048014, 615495] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [993547, 791688] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [688565, 1046501] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [1043849, 990824] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [983928, 1047132] +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [354583, 1031883] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [106069, 1043662] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [565622, 1046430] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [1046797, 502036] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [354583, 1031883] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [106069, 1043662] +processed_samples 12400 unjoint_samples 12400 joint_samples 38 [1008915, 359357] +processed_samples 12400 unjoint_samples 12400 joint_samples 38 [1008915, 359357] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [565622, 1046430] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [1046831, 588798] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [1046831, 588798] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [1046797, 502036] +processed_samples 12400 unjoint_samples 12400 joint_samples 36 [1026683, 1026981] +[h264 @ 0x5649f279e4c0] mmco: unref short failure +processed_samples 12400 unjoint_samples 12400 joint_samples 36 [939259, 1046501] +processed_samples 12400 unjoint_samples 12400 joint_samples 36 [1026683, 1026981] +[h264 @ 0x555a01ca1040] mmco: unref short failure +processed_samples 12400 unjoint_samples 12400 joint_samples 36 [939259, 1046501] +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1046526, 269576] +processed_samples 12500 unjoint_samples 12500 joint_samples 38 [1008915, 805986] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1040955, 213455] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [392874, 1043662] +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1046797, 834976] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [706780, 1031883] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [943416, 1046430] +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1046526, 269576] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [706780, 1031883] +processed_samples 12500 unjoint_samples 12500 joint_samples 38 [1008915, 805986] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1046797, 834976] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1040955, 213455] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [392874, 1043662] +processed_samples 12500 unjoint_samples 12500 joint_samples 38 [24850, 1043320] +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [943416, 1046430] +processed_samples 12500 unjoint_samples 12500 joint_samples 38 [24850, 1043320] +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5559ff447180] mmco: unref short failure +[h264 @ 0x5559ff447180] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +processed_samples 12600 unjoint_samples 12600 joint_samples 39 [17506, 1046889] +processed_samples 12600 unjoint_samples 12600 joint_samples 39 [17506, 1046889] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1028954, 1031883] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1028954, 1031883] +processed_samples 12600 unjoint_samples 12600 joint_samples 38 [141516, 1046478] +processed_samples 12600 unjoint_samples 12600 joint_samples 38 [1035194, 366712] +processed_samples 12600 unjoint_samples 12600 joint_samples 38 [141516, 1046478] +processed_samples 12600 unjoint_samples 12600 joint_samples 38 [1035194, 366712] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1040955, 464830] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1040955, 464830] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1046526, 539818] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1046526, 539818] +processed_samples 12600 unjoint_samples 12600 joint_samples 38 [280121, 1043320] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [641125, 1043662] +processed_samples 12600 unjoint_samples 12600 joint_samples 38 [280121, 1043320] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [641125, 1043662] +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x555a06a83680] mmco: unref short failure +[h264 @ 0x555a06a83680] mmco: unref short failure +[h264 @ 0x555a06a83680] mmco: unref short failure +[h264 @ 0x555a06a83680] mmco: unref short failure +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [1045818, 284715] +[h264 @ 0x5649efefb000] mmco: unref short failure +processed_samples 12700 unjoint_samples 12700 joint_samples 39 [293695, 1046889] +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [1040955, 855781] +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [494410, 1043320] +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [1035194, 660677] +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [562324, 1046478] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [1046526, 893046] +[h264 @ 0x555a005e9840] mmco: unref short failure +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [949756, 1043662] +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [1045818, 284715] +processed_samples 12700 unjoint_samples 12700 joint_samples 39 [293695, 1046889] +[h264 @ 0x5649f2850a40] mmco: unref short failure +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [1040955, 855781] +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [494410, 1043320] +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [1035194, 660677] +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [562324, 1046478] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [1046526, 893046] +[h264 @ 0x5649f279e4c0] mmco: unref short failure +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [949756, 1043662] +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1046526, 139839] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1045818, 554980] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [857097, 1046478] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [219037, 1045808] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1043268, 112077] +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +processed_samples 12800 unjoint_samples 12800 joint_samples 39 [560760, 1046889] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [778423, 1043320] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1035194, 898744] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1046526, 139839] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1045818, 554980] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [857097, 1046478] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [219037, 1045808] +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1043268, 112077] +processed_samples 12800 unjoint_samples 12800 joint_samples 39 [560760, 1046889] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [778423, 1043320] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1035194, 898744] +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x555a06a83680] mmco: unref short failure +[h264 @ 0x555a06a83680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 12900 unjoint_samples 12900 joint_samples 39 [1046397, 85695] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1046526, 528940] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [467957, 1045808] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1043268, 511206] +processed_samples 12900 unjoint_samples 12900 joint_samples 39 [1046275, 62598] +processed_samples 12900 unjoint_samples 12900 joint_samples 39 [838773, 1046889] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1045818, 1002455] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1021311, 1043320] +processed_samples 12900 unjoint_samples 12900 joint_samples 39 [1046397, 85695] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1046526, 528940] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [467957, 1045808] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1043268, 511206] +processed_samples 12900 unjoint_samples 12900 joint_samples 39 [1046275, 62598] +processed_samples 12900 unjoint_samples 12900 joint_samples 39 [838773, 1046889] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1021311, 1043320] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1045818, 1002455] +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5559fddcae00] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ee521080] mmco: unref short failure +[h264 @ 0x5649ee521080] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1046275, 369384] +processed_samples 13000 unjoint_samples 13000 joint_samples 40 [1045623, 91370] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [240077, 1047349] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1046397, 334750] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1046275, 369384] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [223695, 1044019] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [956646, 1045808] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1046526, 814297] +processed_samples 13000 unjoint_samples 13000 joint_samples 40 [1045623, 91370] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [240077, 1047349] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1046397, 334750] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [223695, 1044019] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1046526, 814297] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [956646, 1045808] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1043268, 872584] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1043268, 872584] +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [103466, 1024651] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [103466, 1024651] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [571757, 1047349] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [571757, 1047349] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1046275, 709019] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1046275, 709019] +processed_samples 13100 unjoint_samples 13100 joint_samples 40 [1045623, 394135] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [93796, 1029821] +processed_samples 13100 unjoint_samples 13100 joint_samples 40 [1045623, 394135] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [93796, 1029821] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [609128, 1044019] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1046397, 678441] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [609128, 1044019] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1046397, 678441] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1046417, 212551] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1046417, 212551] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fffa6640] mmco: unref short failure +[h264 @ 0x5559fffa6640] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [410289, 1024651] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [410289, 1024651] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [825074, 1047349] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [453053, 1029821] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [909462, 1044019] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [825074, 1047349] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1046397, 931380] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [453053, 1029821] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [909462, 1044019] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1046397, 931380] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1046417, 469797] +processed_samples 13200 unjoint_samples 13200 joint_samples 40 [1045623, 665843] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1046417, 469797] +processed_samples 13200 unjoint_samples 13200 joint_samples 40 [1045623, 665843] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1046275, 1020331] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1046275, 1020331] +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a012d1080] mmco: unref short failure +[h264 @ 0x555a012d1080] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559fd7d6f00] mmco: unref short failure +[h264 @ 0x5559fd7d6f00] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a01603b40] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [63461, 1047349] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1046397, 230256] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1046275, 280185] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [63461, 1047349] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1046397, 230256] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1046275, 280185] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1028881, 517520] +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [794114, 1024651] +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [1046417, 860811] +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [794114, 1024651] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1028881, 517520] +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [1046417, 860811] +processed_samples 13300 unjoint_samples 13300 joint_samples 41 [399929, 835418] +processed_samples 13300 unjoint_samples 13300 joint_samples 41 [399929, 835418] +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [744636, 1029821] +[h264 @ 0x5559fdcce100] mmco: unref short failure +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [744636, 1029821] +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a02849d00] Missing reference picture, default is 65530 +[h264 @ 0x555a02849d00] Missing reference picture, default is 65530 +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] Missing reference picture, default is 65530 +[h264 @ 0x5649ef6eac00] Missing reference picture, default is 65530 +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [275376, 1047349] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [275376, 1047349] +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [96442, 1045599] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [96442, 1045599] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1046275, 632442] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1046275, 632442] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1028881, 803492] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1044023, 7939] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1044023, 7939] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1028881, 803492] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1046397, 549541] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1046397, 549541] +processed_samples 13400 unjoint_samples 13400 joint_samples 41 [673681, 835418] +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1046469, 2275] +processed_samples 13400 unjoint_samples 13400 joint_samples 41 [673681, 835418] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1046469, 2275] +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559ffc94e00] mmco: unref short failure +[h264 @ 0x5559ffc94e00] mmco: unref short failure +[h264 @ 0x5559ffc94e00] mmco: unref short failure +[h264 @ 0x555a020dad00] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1044023, 290292] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1046469, 307868] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [789281, 1047349] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [344892, 1045599] +processed_samples 13500 unjoint_samples 13500 joint_samples 41 [886594, 888903] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1046275, 850373] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1031113, 1032192] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1046397, 1039659] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1044023, 290292] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1046469, 307868] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [789281, 1047349] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [344892, 1045599] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1046275, 850373] +processed_samples 13500 unjoint_samples 13500 joint_samples 41 [886594, 888903] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1046397, 1039659] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1031113, 1032192] +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [1046469, 616229] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [1046469, 616229] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [625978, 1045599] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [625978, 1045599] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [196349, 1038113] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [1045341, 106633] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [1045341, 106633] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [196349, 1038113] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [199051, 1045463] +processed_samples 13600 unjoint_samples 13600 joint_samples 42 [1046810, 9387] +processed_samples 13600 unjoint_samples 13600 joint_samples 42 [1046810, 9387] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [199051, 1045463] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [1046620, 355512] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [1044023, 603098] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [1046620, 355512] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [1044023, 603098] +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649ee521080] mmco: unref short failure +[h264 @ 0x5649ee521080] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [1046620, 898755] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [1046620, 898755] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [1045341, 466511] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [435373, 1038113] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [435373, 1038113] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [1045341, 466511] +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [1046469, 851558] +processed_samples 13700 unjoint_samples 13700 joint_samples 42 [1046810, 283747] +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [486705, 1045463] +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +processed_samples 13700 unjoint_samples 13700 joint_samples 42 [1046810, 283747] +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [1046469, 851558] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [486705, 1045463] +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [1044023, 870758] +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [910623, 1045599] +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [1044023, 870758] +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [910623, 1045599] +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +processed_samples 13800 unjoint_samples 13800 joint_samples 42 [184223, 1045284] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [124485, 1046118] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1046955, 122399] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [149934, 1047710] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1045341, 735665] +processed_samples 13800 unjoint_samples 13800 joint_samples 42 [1046810, 638210] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [702685, 1038113] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [764513, 1045463] +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +processed_samples 13800 unjoint_samples 13800 joint_samples 42 [184223, 1045284] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [124485, 1046118] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1046955, 122399] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [149934, 1047710] +processed_samples 13800 unjoint_samples 13800 joint_samples 42 [1046810, 638210] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1045341, 735665] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [702685, 1038113] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [764513, 1045463] +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1046955, 473432] +processed_samples 13900 unjoint_samples 13900 joint_samples 42 [58896, 1047059] +processed_samples 13900 unjoint_samples 13900 joint_samples 42 [58896, 1047059] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1046955, 473432] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [332452, 1046118] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [332452, 1046118] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [409555, 1047710] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [409555, 1047710] +processed_samples 13900 unjoint_samples 13900 joint_samples 42 [619416, 1045284] +processed_samples 13900 unjoint_samples 13900 joint_samples 42 [619416, 1045284] +processed_samples 13900 unjoint_samples 13900 joint_samples 42 [1046810, 900611] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [963526, 1038113] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [963526, 1038113] +processed_samples 13900 unjoint_samples 13900 joint_samples 42 [1046810, 900611] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1042745, 1045463] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1042745, 1045463] +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649f2c7b340] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [1005724, 285725] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [423876, 1047059] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [1005724, 285725] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [779490, 1047710] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [1046955, 890045] +processed_samples 14000 unjoint_samples 14000 joint_samples 43 [153836, 1025384] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [423876, 1047059] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [248842, 1045463] +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +processed_samples 14000 unjoint_samples 14000 joint_samples 43 [153836, 1025384] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [779490, 1047710] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [1046955, 890045] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [248842, 1045463] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [592321, 1046118] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [924084, 1045284] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [592321, 1046118] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [924084, 1045284] +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a0076ff80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1046955, 141152] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1046955, 141152] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [36720, 1047710] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [36720, 1047710] +processed_samples 14100 unjoint_samples 14100 joint_samples 43 [183984, 1045284] +processed_samples 14100 unjoint_samples 14100 joint_samples 43 [183984, 1045284] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1005724, 662471] +[h264 @ 0x555a01aa8680] mmco: unref short failure +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [882020, 1047059] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1005724, 662471] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [882020, 1047059] +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +processed_samples 14100 unjoint_samples 14100 joint_samples 43 [520287, 1025384] +processed_samples 14100 unjoint_samples 14100 joint_samples 43 [520287, 1025384] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [552508, 1045463] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [552508, 1045463] +processed_samples 14100 unjoint_samples 14100 joint_samples 41 [920704, 1046118] +processed_samples 14100 unjoint_samples 14100 joint_samples 41 [920704, 1046118] +[h264 @ 0x555a0076ff80] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [159121, 1046118] +processed_samples 14200 unjoint_samples 14200 joint_samples 43 [131062, 1047059] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [434399, 1047710] +processed_samples 14200 unjoint_samples 14200 joint_samples 43 [453173, 1045284] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [1046955, 515088] +processed_samples 14200 unjoint_samples 14200 joint_samples 43 [919180, 1025384] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [159121, 1046118] +processed_samples 14200 unjoint_samples 14200 joint_samples 43 [131062, 1047059] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [434399, 1047710] +processed_samples 14200 unjoint_samples 14200 joint_samples 43 [453173, 1045284] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [1046955, 515088] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [1005724, 943910] +processed_samples 14200 unjoint_samples 14200 joint_samples 43 [919180, 1025384] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [1005724, 943910] +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [892352, 1045463] +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [892352, 1045463] +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +processed_samples 14300 unjoint_samples 14300 joint_samples 44 [202728, 1042201] +processed_samples 14300 unjoint_samples 14300 joint_samples 43 [76695, 1047559] +processed_samples 14300 unjoint_samples 14300 joint_samples 43 [1043636, 197466] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [527394, 1046118] +processed_samples 14300 unjoint_samples 14300 joint_samples 43 [389192, 1047059] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [1046955, 813807] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [839538, 1047710] +processed_samples 14300 unjoint_samples 14300 joint_samples 43 [767251, 1045284] +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +processed_samples 14300 unjoint_samples 14300 joint_samples 44 [202728, 1042201] +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +processed_samples 14300 unjoint_samples 14300 joint_samples 43 [76695, 1047559] +processed_samples 14300 unjoint_samples 14300 joint_samples 43 [1043636, 197466] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [1046955, 813807] +[h264 @ 0x555a02849d00] mmco: unref short failure +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [527394, 1046118] +processed_samples 14300 unjoint_samples 14300 joint_samples 43 [389192, 1047059] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [839538, 1047710] +processed_samples 14300 unjoint_samples 14300 joint_samples 43 [767251, 1045284] +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [110012, 1030169] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [1047213, 59291] +processed_samples 14400 unjoint_samples 14400 joint_samples 44 [84987, 1046776] +processed_samples 14400 unjoint_samples 14400 joint_samples 44 [578833, 1042201] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [1047213, 59291] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [110012, 1030169] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [1043636, 451790] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [349469, 1047559] +processed_samples 14400 unjoint_samples 14400 joint_samples 44 [84987, 1046776] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [1043636, 451790] +processed_samples 14400 unjoint_samples 14400 joint_samples 42 [930189, 1046118] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [690881, 1047059] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [690881, 1047059] +processed_samples 14400 unjoint_samples 14400 joint_samples 44 [578833, 1042201] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [349469, 1047559] +processed_samples 14400 unjoint_samples 14400 joint_samples 42 [930189, 1046118] +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1047213, 493755] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [156327, 1048242] +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [351813, 1030169] +processed_samples 14500 unjoint_samples 14500 joint_samples 44 [428707, 1046776] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [351813, 1030169] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1022921, 1047059] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1047213, 493755] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1043636, 755528] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [156327, 1048242] +processed_samples 14500 unjoint_samples 14500 joint_samples 44 [428707, 1046776] +processed_samples 14500 unjoint_samples 14500 joint_samples 44 [926515, 1042201] +processed_samples 14500 unjoint_samples 14500 joint_samples 44 [926515, 1042201] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [649191, 1047559] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [649191, 1047559] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1022921, 1047059] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1043636, 755528] +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 14600 unjoint_samples 14600 joint_samples 44 [1046605, 187775] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [468598, 1048242] +processed_samples 14600 unjoint_samples 14600 joint_samples 45 [122465, 1046598] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1047213, 762387] +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +processed_samples 14600 unjoint_samples 14600 joint_samples 44 [716648, 1046776] +[h264 @ 0x5649efe68b80] mmco: unref short failure +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [957133, 1047559] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [668285, 1030169] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1043636, 1024674] +[h264 @ 0x5649f2c24680] mmco: unref short failure +processed_samples 14600 unjoint_samples 14600 joint_samples 44 [1046605, 187775] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [468598, 1048242] +processed_samples 14600 unjoint_samples 14600 joint_samples 45 [122465, 1046598] +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1047213, 762387] +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +processed_samples 14600 unjoint_samples 14600 joint_samples 44 [716648, 1046776] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [668285, 1030169] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1043636, 1024674] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [957133, 1047559] +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [11089, 1046570] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [11089, 1046570] +processed_samples 14700 unjoint_samples 14700 joint_samples 45 [1039184, 79494] +processed_samples 14700 unjoint_samples 14700 joint_samples 45 [1039184, 79494] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [335570, 1047559] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [335570, 1047559] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [1046605, 412179] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [1046605, 412179] +processed_samples 14700 unjoint_samples 14700 joint_samples 45 [477050, 1046598] +processed_samples 14700 unjoint_samples 14700 joint_samples 45 [477050, 1046598] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [1047404, 344796] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [1047404, 344796] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [788256, 1048242] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [788256, 1048242] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [992955, 1030169] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [992955, 1030169] +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a01468bc0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +processed_samples 14800 unjoint_samples 14800 joint_samples 45 [723060, 1046598] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [92635, 1048242] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [239637, 1047448] +processed_samples 14800 unjoint_samples 14800 joint_samples 45 [1039184, 429369] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [633469, 1047559] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [329384, 1046570] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1047404, 622276] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1046605, 710576] +processed_samples 14800 unjoint_samples 14800 joint_samples 45 [723060, 1046598] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [92635, 1048242] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [239637, 1047448] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [329384, 1046570] +processed_samples 14800 unjoint_samples 14800 joint_samples 45 [1039184, 429369] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1046605, 710576] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1047404, 622276] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [633469, 1047559] +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [533642, 1047448] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [533642, 1047448] +processed_samples 14900 unjoint_samples 14900 joint_samples 46 [46039, 1046659] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [980286, 1047559] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [980286, 1047559] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [339028, 1048242] +processed_samples 14900 unjoint_samples 14900 joint_samples 46 [46039, 1046659] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [339028, 1048242] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [610685, 1046570] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [610685, 1046570] +processed_samples 14900 unjoint_samples 14900 joint_samples 45 [1039184, 814639] +processed_samples 14900 unjoint_samples 14900 joint_samples 45 [1039184, 814639] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1047404, 983297] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1047404, 983297] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1046605, 990142] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1046605, 990142] +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +processed_samples 15000 unjoint_samples 15000 joint_samples 46 [299825, 1046659] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [696351, 1048242] +processed_samples 15000 unjoint_samples 15000 joint_samples 45 [1046605, 194912] +processed_samples 15000 unjoint_samples 15000 joint_samples 46 [67811, 1044908] +processed_samples 15000 unjoint_samples 15000 joint_samples 45 [1047404, 204719] +processed_samples 15000 unjoint_samples 15000 joint_samples 45 [1026421, 275715] +processed_samples 15000 unjoint_samples 15000 joint_samples 46 [299825, 1046659] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [696351, 1048242] +processed_samples 15000 unjoint_samples 15000 joint_samples 46 [67811, 1044908] +processed_samples 15000 unjoint_samples 15000 joint_samples 45 [1046605, 194912] +processed_samples 15000 unjoint_samples 15000 joint_samples 45 [1047404, 204719] +[h264 @ 0x5649efbbf540] mmco: unref short failure +processed_samples 15000 unjoint_samples 15000 joint_samples 45 [1026421, 275715] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [936282, 1046570] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [936282, 1046570] +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [939138, 1047448] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [939138, 1047448] +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f4b89c80] mmco: unref short failure +[h264 @ 0x5649f4b89c80] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f4b89c80] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 44 [984374, 1048242] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1042748, 177357] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [161612, 1047448] +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1047404, 507909] +processed_samples 15100 unjoint_samples 15100 joint_samples 46 [429458, 1044908] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1046605, 525556] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1026421, 554426] +[h264 @ 0x5559ffcdde40] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 46 [643541, 1046659] +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [161612, 1047448] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1042748, 177357] +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 46 [429458, 1044908] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1046605, 525556] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1047404, 507909] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1026421, 554426] +[h264 @ 0x5649ef232180] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 44 [984374, 1048242] +processed_samples 15100 unjoint_samples 15100 joint_samples 46 [643541, 1046659] +[h264 @ 0x5649f24b3080] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee170b40] mmco: unref short failure +[h264 @ 0x5649ee170b40] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [407349, 1047448] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1048022, 204247] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1026421, 901311] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1042748, 568415] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1046605, 868111] +processed_samples 15200 unjoint_samples 15200 joint_samples 46 [646524, 1044908] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1047404, 759171] +processed_samples 15200 unjoint_samples 15200 joint_samples 46 [911276, 1046659] +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [407349, 1047448] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1048022, 204247] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1042748, 568415] +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1026421, 901311] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1046605, 868111] +processed_samples 15200 unjoint_samples 15200 joint_samples 46 [646524, 1044908] +processed_samples 15200 unjoint_samples 15200 joint_samples 46 [911276, 1046659] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1047404, 759171] +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +processed_samples 15300 unjoint_samples 15300 joint_samples 46 [1046605, 90877] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [642808, 1047448] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1048022, 581832] +processed_samples 15300 unjoint_samples 15300 joint_samples 47 [103812, 1046659] +processed_samples 15300 unjoint_samples 15300 joint_samples 46 [1046605, 90877] +processed_samples 15300 unjoint_samples 15300 joint_samples 46 [96183, 1043822] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1042748, 830319] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [642808, 1047448] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1048022, 581832] +processed_samples 15300 unjoint_samples 15300 joint_samples 46 [940276, 1044908] +processed_samples 15300 unjoint_samples 15300 joint_samples 47 [103812, 1046659] +processed_samples 15300 unjoint_samples 15300 joint_samples 46 [96183, 1043822] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1042748, 830319] +processed_samples 15300 unjoint_samples 15300 joint_samples 46 [940276, 1044908] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1047404, 1034632] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1047404, 1034632] +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [379375, 1043822] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [1048022, 1045809] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [1047411, 28274] +processed_samples 15400 unjoint_samples 15400 joint_samples 47 [1047529, 151070] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [263010, 1047821] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [1046605, 386686] +processed_samples 15400 unjoint_samples 15400 joint_samples 47 [467656, 1046659] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [1048022, 1045809] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [379375, 1043822] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [1047411, 28274] +processed_samples 15400 unjoint_samples 15400 joint_samples 47 [1047529, 151070] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [263010, 1047821] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [994220, 1047448] +processed_samples 15400 unjoint_samples 15400 joint_samples 47 [467656, 1046659] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [1046605, 386686] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [994220, 1047448] +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a00ca9740] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eec6dac0] mmco: unref short failure +[h264 @ 0x5649eec6dac0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1047411, 345187] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [404461, 1045809] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1047411, 345187] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1046514, 227331] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [404461, 1045809] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [564845, 1047821] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1046514, 227331] +processed_samples 15500 unjoint_samples 15500 joint_samples 47 [1047529, 476934] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [564845, 1047821] +processed_samples 15500 unjoint_samples 15500 joint_samples 47 [1047529, 476934] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1046605, 706854] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1046605, 706854] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [653970, 1043822] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [653970, 1043822] +processed_samples 15500 unjoint_samples 15500 joint_samples 47 [784848, 1046659] +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +processed_samples 15500 unjoint_samples 15500 joint_samples 47 [784848, 1046659] +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1046514, 489869] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1046514, 489869] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [728191, 1045809] +processed_samples 15600 unjoint_samples 15600 joint_samples 47 [15386, 1048015] +processed_samples 15600 unjoint_samples 15600 joint_samples 47 [15386, 1048015] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [728191, 1045809] +processed_samples 15600 unjoint_samples 15600 joint_samples 48 [18458, 1046659] +processed_samples 15600 unjoint_samples 15600 joint_samples 48 [18458, 1046659] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [818786, 1047821] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1047411, 713963] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1047411, 713963] +processed_samples 15600 unjoint_samples 15600 joint_samples 47 [1047529, 849609] +processed_samples 15600 unjoint_samples 15600 joint_samples 47 [1047529, 849609] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [818786, 1047821] +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1046605, 1016691] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1046605, 1016691] +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [1046514, 751262] +processed_samples 15700 unjoint_samples 15700 joint_samples 48 [1047529, 257143] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [1041522, 131458] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [1046605, 275662] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [284312, 1048015] +processed_samples 15700 unjoint_samples 15700 joint_samples 48 [247417, 1046659] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [1047411, 1004159] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [1013183, 1045809] +processed_samples 15700 unjoint_samples 15700 joint_samples 48 [1047529, 257143] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [1041522, 131458] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [1046514, 751262] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [1047411, 1004159] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [1046605, 275662] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [284312, 1048015] +processed_samples 15700 unjoint_samples 15700 joint_samples 48 [247417, 1046659] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [1013183, 1045809] +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a008ecd80] mmco: unref short failure +[h264 @ 0x555a008ecd80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +processed_samples 15800 unjoint_samples 15800 joint_samples 48 [1047529, 653291] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1042287, 326785] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [648506, 1048015] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [254576, 1046923] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1041522, 525114] +processed_samples 15800 unjoint_samples 15800 joint_samples 48 [619260, 1046659] +processed_samples 15800 unjoint_samples 15800 joint_samples 46 [1046514, 1026555] +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1046605, 680037] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1042287, 326785] +processed_samples 15800 unjoint_samples 15800 joint_samples 48 [1047529, 653291] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [648506, 1048015] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [254576, 1046923] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1041522, 525114] +processed_samples 15800 unjoint_samples 15800 joint_samples 48 [619260, 1046659] +processed_samples 15800 unjoint_samples 15800 joint_samples 46 [1046514, 1026555] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1046605, 680037] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fdbd2bc0] mmco: unref short failure +[h264 @ 0x5559fdbd2bc0] mmco: unref short failure +[h264 @ 0x5559fdbd2bc0] mmco: unref short failure +[h264 @ 0x5559fdbd2bc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5559fceb9280] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +processed_samples 15900 unjoint_samples 15900 joint_samples 48 [1046605, 83978] +processed_samples 15900 unjoint_samples 15900 joint_samples 48 [1046605, 83978] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [244074, 1048371] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [244074, 1048371] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [996543, 1048015] +[h264 @ 0x5559fd252180] mmco: unref short failure +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [996543, 1048015] +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [1042287, 724518] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [545936, 1046923] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [1041522, 860416] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [1042287, 724518] +processed_samples 15900 unjoint_samples 15900 joint_samples 48 [897266, 1046659] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [545936, 1046923] +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +processed_samples 15900 unjoint_samples 15900 joint_samples 48 [897266, 1046659] +processed_samples 15900 unjoint_samples 15900 joint_samples 48 [1047529, 936904] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 15900 unjoint_samples 15900 joint_samples 48 [1047529, 936904] +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [1041522, 860416] +[h264 @ 0x5649eecbd500] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a01374900] mmco: unref short failure +[h264 @ 0x555a01374900] mmco: unref short failure +[h264 @ 0x555a01374900] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +processed_samples 16000 unjoint_samples 16000 joint_samples 49 [222990, 1046734] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [1046605, 351559] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [52873, 1046859] +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [491351, 1048371] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [8070, 1046923] +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +processed_samples 16000 unjoint_samples 16000 joint_samples 49 [222990, 1046734] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [52873, 1046859] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [1046605, 351559] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [491351, 1048371] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [8070, 1046923] +processed_samples 16000 unjoint_samples 16000 joint_samples 49 [201258, 1046659] +processed_samples 16000 unjoint_samples 16000 joint_samples 49 [201258, 1046659] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [207561, 1048015] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [207561, 1048015] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [1042287, 1031405] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [1042287, 1031405] +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a03f2e600] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5559ffce3ac0] mmco: unref short failure +[h264 @ 0x5559ffce3ac0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [395433, 1040248] +processed_samples 16100 unjoint_samples 16100 joint_samples 49 [540322, 1046734] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [369698, 1046859] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [343406, 1046923] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [504219, 1048015] +processed_samples 16100 unjoint_samples 16100 joint_samples 49 [486512, 1046659] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1046605, 639047] +processed_samples 16100 unjoint_samples 16100 joint_samples 47 [809565, 1048371] +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [395433, 1040248] +processed_samples 16100 unjoint_samples 16100 joint_samples 49 [540322, 1046734] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [369698, 1046859] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [504219, 1048015] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [343406, 1046923] +processed_samples 16100 unjoint_samples 16100 joint_samples 49 [486512, 1046659] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1046605, 639047] +processed_samples 16100 unjoint_samples 16100 joint_samples 47 [809565, 1048371] +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5559ffce3ac0] mmco: unref short failure +[h264 @ 0x5559ffce3ac0] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1046838, 36849] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1046838, 36849] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [696758, 1046923] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [696758, 1046923] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [715740, 1046859] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [715740, 1046859] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [864313, 1048015] +processed_samples 16200 unjoint_samples 16200 joint_samples 49 [753503, 1046734] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [864313, 1048015] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [764185, 1040248] +processed_samples 16200 unjoint_samples 16200 joint_samples 49 [765154, 1046659] +processed_samples 16200 unjoint_samples 16200 joint_samples 49 [753503, 1046734] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [764185, 1040248] +processed_samples 16200 unjoint_samples 16200 joint_samples 49 [765154, 1046659] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1046605, 893554] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1046605, 893554] +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee7de5c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1046605, 224095] +processed_samples 16300 unjoint_samples 16300 joint_samples 50 [1038849, 107087] +processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1046605, 224095] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [960290, 1046923] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [960290, 1046923] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1046838, 447430] +processed_samples 16300 unjoint_samples 16300 joint_samples 50 [1038849, 107087] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1046838, 447430] +processed_samples 16300 unjoint_samples 16300 joint_samples 50 [19272, 1047822] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1021208, 1046859] +processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1009126, 149600] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1021208, 1046859] +processed_samples 16300 unjoint_samples 16300 joint_samples 50 [19272, 1047822] +processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1009126, 149600] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1029841, 1040248] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1029841, 1040248] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f8099e00] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1046605, 499525] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [313617, 1046859] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1046237, 241240] +processed_samples 16400 unjoint_samples 16400 joint_samples 50 [1038849, 521646] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1046562, 221364] +[h264 @ 0x5559ff8cd940] mmco: unref short failure +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1009126, 434271] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1046605, 499525] +processed_samples 16400 unjoint_samples 16400 joint_samples 50 [343985, 1047822] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1009126, 434271] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [313617, 1046859] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1046237, 241240] +processed_samples 16400 unjoint_samples 16400 joint_samples 48 [1046838, 743582] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1046562, 221364] +processed_samples 16400 unjoint_samples 16400 joint_samples 50 [1038849, 521646] +processed_samples 16400 unjoint_samples 16400 joint_samples 50 [343985, 1047822] +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 16400 unjoint_samples 16400 joint_samples 48 [1046838, 743582] +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f700eb80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1047034, 28245] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1047034, 28245] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1046237, 457346] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1046237, 457346] +processed_samples 16500 unjoint_samples 16500 joint_samples 50 [748263, 1047822] +processed_samples 16500 unjoint_samples 16500 joint_samples 50 [748263, 1047822] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1046605, 892360] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1046605, 892360] +processed_samples 16500 unjoint_samples 16500 joint_samples 50 [1038849, 827441] +processed_samples 16500 unjoint_samples 16500 joint_samples 50 [1038849, 827441] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1046562, 465836] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1046562, 465836] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1009126, 707858] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1009126, 707858] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [639624, 1046859] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [639624, 1046859] +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +processed_samples 16600 unjoint_samples 16600 joint_samples 50 [146901, 1047612] +processed_samples 16600 unjoint_samples 16600 joint_samples 50 [146901, 1047612] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [1047034, 271818] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [1047034, 271818] +processed_samples 16600 unjoint_samples 16600 joint_samples 51 [970568, 335378] +processed_samples 16600 unjoint_samples 16600 joint_samples 51 [1043499, 26749] +processed_samples 16600 unjoint_samples 16600 joint_samples 51 [1043499, 26749] +processed_samples 16600 unjoint_samples 16600 joint_samples 51 [970568, 335378] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [1009126, 997896] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [1009126, 997896] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [1046237, 776619] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [1046237, 776619] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [871191, 1046859] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [871191, 1046859] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [1046562, 709072] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [1046562, 709072] +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ee5e1f80] mmco: unref short failure +[h264 @ 0x5649ee5e1f80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [512500, 1047612] +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1047034, 566436] +processed_samples 16700 unjoint_samples 16700 joint_samples 51 [1043499, 491958] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [926586, 345655] +[h264 @ 0x555a012d1080] mmco: unref short failure +[h264 @ 0x555a012d1080] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [512500, 1047612] +[h264 @ 0x5649ef55cb40] mmco: unref short failure +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [369808, 1046503] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [926586, 345655] +processed_samples 16700 unjoint_samples 16700 joint_samples 51 [1043499, 491958] +processed_samples 16700 unjoint_samples 16700 joint_samples 51 [970568, 643165] +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1047034, 566436] +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1046562, 995829] +[h264 @ 0x5559ff97fb80] mmco: unref short failure +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [369808, 1046503] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 16700 unjoint_samples 16700 joint_samples 51 [970568, 643165] +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1046237, 1038259] +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1046562, 995829] +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1046237, 1038259] +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f738ab00] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [808536, 1047612] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [705796, 1046503] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [926586, 644707] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1046237, 284440] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1046562, 244148] +processed_samples 16800 unjoint_samples 16800 joint_samples 51 [970568, 896207] +processed_samples 16800 unjoint_samples 16800 joint_samples 51 [1043499, 732131] +processed_samples 16800 unjoint_samples 16800 joint_samples 49 [1047034, 859904] +[h264 @ 0x555a0192ab00] mmco: unref short failure +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [926586, 644707] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [808536, 1047612] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [705796, 1046503] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1046237, 284440] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1046562, 244148] +processed_samples 16800 unjoint_samples 16800 joint_samples 51 [970568, 896207] +processed_samples 16800 unjoint_samples 16800 joint_samples 51 [1043499, 732131] +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 16800 unjoint_samples 16800 joint_samples 49 [1047034, 859904] +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a01d82f80] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +processed_samples 16900 unjoint_samples 16900 joint_samples 51 [1046364, 46299] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1046237, 563939] +processed_samples 16900 unjoint_samples 16900 joint_samples 52 [95173, 1043931] +processed_samples 16900 unjoint_samples 16900 joint_samples 52 [1046496, 89101] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [153945, 1030463] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1046237, 563939] +processed_samples 16900 unjoint_samples 16900 joint_samples 52 [95173, 1043931] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1046562, 593415] +processed_samples 16900 unjoint_samples 16900 joint_samples 51 [1046364, 46299] +processed_samples 16900 unjoint_samples 16900 joint_samples 52 [1046496, 89101] +[h264 @ 0x5649f2c7b340] mmco: unref short failure +[h264 @ 0x5649f2c7b340] mmco: unref short failure +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1046562, 593415] +[h264 @ 0x5649f2c7b340] mmco: unref short failure +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [153945, 1030463] +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [983868, 1046503] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [987809, 994613] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [983868, 1046503] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [987809, 994613] +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a008b6b40] mmco: unref short failure +[h264 @ 0x555a008b6b40] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +processed_samples 17000 unjoint_samples 17000 joint_samples 52 [1046496, 356297] +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [1046539, 136773] +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [1046364, 514149] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [451472, 1030463] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [203326, 1046503] +processed_samples 17000 unjoint_samples 17000 joint_samples 52 [343107, 1043931] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [1046237, 898074] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [1046562, 948074] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [1046539, 136773] +[h264 @ 0x5649eea7ce40] mmco: unref short failure +processed_samples 17000 unjoint_samples 17000 joint_samples 52 [1046496, 356297] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [1046364, 514149] +[h264 @ 0x5649ee46c840] mmco: unref short failure +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [203326, 1046503] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [451472, 1030463] +processed_samples 17000 unjoint_samples 17000 joint_samples 52 [343107, 1043931] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [1046562, 948074] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [1046237, 898074] +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +processed_samples 17100 unjoint_samples 17100 joint_samples 52 [650495, 1043931] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [124216, 1043853] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [182361, 1046533] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [667616, 1046503] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1046539, 416250] +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1046364, 772920] +processed_samples 17100 unjoint_samples 17100 joint_samples 52 [1046496, 739044] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [124216, 1043853] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [182361, 1046533] +processed_samples 17100 unjoint_samples 17100 joint_samples 52 [650495, 1043931] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [667616, 1046503] +processed_samples 17100 unjoint_samples 17100 joint_samples 50 [691117, 1030463] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1046539, 416250] +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1046364, 772920] +processed_samples 17100 unjoint_samples 17100 joint_samples 52 [1046496, 739044] +processed_samples 17100 unjoint_samples 17100 joint_samples 50 [691117, 1030463] +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x5649f00553c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649f4b89c80] mmco: unref short failure +[h264 @ 0x5649f4b89c80] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +processed_samples 17200 unjoint_samples 17200 joint_samples 53 [1046496, 42043] +processed_samples 17200 unjoint_samples 17200 joint_samples 52 [1046364, 81991] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [512052, 1043853] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [498091, 1046533] +processed_samples 17200 unjoint_samples 17200 joint_samples 52 [1046364, 81991] +processed_samples 17200 unjoint_samples 17200 joint_samples 53 [1046496, 42043] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [1046539, 847044] +processed_samples 17200 unjoint_samples 17200 joint_samples 50 [1022791, 1030463] +processed_samples 17200 unjoint_samples 17200 joint_samples 52 [1022425, 1043931] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [512052, 1043853] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [498091, 1046533] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [1046539, 847044] +processed_samples 17200 unjoint_samples 17200 joint_samples 52 [1022425, 1043931] +processed_samples 17200 unjoint_samples 17200 joint_samples 50 [1022791, 1030463] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [945808, 1046503] +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [945808, 1046503] +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [254398, 1024488] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [1046874, 224911] +processed_samples 17300 unjoint_samples 17300 joint_samples 53 [1046496, 325096] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [1046254, 251953] +processed_samples 17300 unjoint_samples 17300 joint_samples 53 [1041886, 329927] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [1046364, 424804] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [254398, 1024488] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [1046874, 224911] +processed_samples 17300 unjoint_samples 17300 joint_samples 53 [1046496, 325096] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [1046364, 424804] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [1046254, 251953] +processed_samples 17300 unjoint_samples 17300 joint_samples 53 [1041886, 329927] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [957816, 1046533] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [944115, 1043853] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [957816, 1046533] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [944115, 1043853] +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x555a01603b40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +processed_samples 17400 unjoint_samples 17400 joint_samples 51 [1046874, 616398] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1047235, 363744] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [540046, 1024488] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1045065, 210144] +processed_samples 17400 unjoint_samples 17400 joint_samples 53 [1041886, 758627] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1046364, 851077] +processed_samples 17400 unjoint_samples 17400 joint_samples 53 [1046496, 614136] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1046254, 758247] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1047235, 363744] +processed_samples 17400 unjoint_samples 17400 joint_samples 51 [1046874, 616398] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [540046, 1024488] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1045065, 210144] +processed_samples 17400 unjoint_samples 17400 joint_samples 53 [1041886, 758627] +processed_samples 17400 unjoint_samples 17400 joint_samples 53 [1046496, 614136] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1046364, 851077] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1046254, 758247] +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a01603b40] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [1047235, 700229] +processed_samples 17500 unjoint_samples 17500 joint_samples 54 [60450, 1019283] +processed_samples 17500 unjoint_samples 17500 joint_samples 53 [1046364, 138426] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [1047235, 700229] +processed_samples 17500 unjoint_samples 17500 joint_samples 54 [60450, 1019283] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [1045065, 565351] +processed_samples 17500 unjoint_samples 17500 joint_samples 53 [19392, 1046811] +processed_samples 17500 unjoint_samples 17500 joint_samples 53 [1046364, 138426] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [784006, 1024488] +processed_samples 17500 unjoint_samples 17500 joint_samples 53 [19392, 1046811] +processed_samples 17500 unjoint_samples 17500 joint_samples 53 [1046496, 962646] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [1045065, 565351] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [784006, 1024488] +processed_samples 17500 unjoint_samples 17500 joint_samples 51 [1046874, 929482] +processed_samples 17500 unjoint_samples 17500 joint_samples 53 [1046496, 962646] +processed_samples 17500 unjoint_samples 17500 joint_samples 51 [1046874, 929482] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1046874, 188377] +processed_samples 17600 unjoint_samples 17600 joint_samples 54 [1046910, 200641] +processed_samples 17600 unjoint_samples 17600 joint_samples 53 [308994, 1046811] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1045065, 800487] +[h264 @ 0x555a009771c0] mmco: unref short failure +[h264 @ 0x555a009771c0] mmco: unref short failure +processed_samples 17600 unjoint_samples 17600 joint_samples 53 [1046364, 427102] +processed_samples 17600 unjoint_samples 17600 joint_samples 54 [437610, 1019283] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1002135, 1024488] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1046874, 188377] +processed_samples 17600 unjoint_samples 17600 joint_samples 54 [1046910, 200641] +processed_samples 17600 unjoint_samples 17600 joint_samples 53 [308994, 1046811] +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 17600 unjoint_samples 17600 joint_samples 54 [437610, 1019283] +processed_samples 17600 unjoint_samples 17600 joint_samples 53 [1046364, 427102] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1047235, 983555] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1045065, 800487] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1002135, 1024488] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1047235, 983555] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649f4b89c80] mmco: unref short failure +[h264 @ 0x5649f4b89c80] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ff447180] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5559fd7d6f00] mmco: unref short failure +[h264 @ 0x5559fd7d6f00] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 17700 unjoint_samples 17700 joint_samples 52 [1046874, 504009] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [349109, 921903] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [196340, 1047550] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [1046364, 735773] +processed_samples 17700 unjoint_samples 17700 joint_samples 54 [747400, 1019283] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [700155, 1046811] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [220594, 1037240] +processed_samples 17700 unjoint_samples 17700 joint_samples 54 [1046910, 463930] +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 17700 unjoint_samples 17700 joint_samples 52 [1046874, 504009] +[h264 @ 0x555a01d82f80] mmco: unref short failure +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [700155, 1046811] +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [196340, 1047550] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [1046364, 735773] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [349109, 921903] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [220594, 1037240] +processed_samples 17700 unjoint_samples 17700 joint_samples 54 [1046910, 463930] +processed_samples 17700 unjoint_samples 17700 joint_samples 54 [747400, 1019283] +[h264 @ 0x555a0076ff80] mmco: unref short failure +[h264 @ 0x555a0076ff80] mmco: unref short failure +[h264 @ 0x555a0076ff80] mmco: unref short failure +[h264 @ 0x555a0076ff80] mmco: unref short failure +[h264 @ 0x555a0076ff80] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a00eae800] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ee521080] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +processed_samples 17800 unjoint_samples 17800 joint_samples 52 [1046874, 851580] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [440619, 1047550] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [478442, 1037240] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [1024371, 1046811] +processed_samples 17800 unjoint_samples 17800 joint_samples 52 [1046874, 851580] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [609333, 921903] +processed_samples 17800 unjoint_samples 17800 joint_samples 54 [1046910, 980262] +processed_samples 17800 unjoint_samples 17800 joint_samples 54 [38127, 1039929] +processed_samples 17800 unjoint_samples 17800 joint_samples 54 [1014725, 1019283] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [478442, 1037240] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [440619, 1047550] +processed_samples 17800 unjoint_samples 17800 joint_samples 54 [1014725, 1019283] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [1024371, 1046811] +processed_samples 17800 unjoint_samples 17800 joint_samples 54 [1046910, 980262] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [609333, 921903] +processed_samples 17800 unjoint_samples 17800 joint_samples 54 [38127, 1039929] +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a04eb0f40] mmco: unref short failure +[h264 @ 0x555a04eb0f40] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +processed_samples 17900 unjoint_samples 17900 joint_samples 55 [244587, 1041944] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [117114, 1043785] +processed_samples 17900 unjoint_samples 17900 joint_samples 54 [358824, 1039929] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [765583, 1047550] +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +processed_samples 17900 unjoint_samples 17900 joint_samples 55 [1036890, 387692] +processed_samples 17900 unjoint_samples 17900 joint_samples 54 [220760, 1046811] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [784032, 1037240] +processed_samples 17900 unjoint_samples 17900 joint_samples 55 [244587, 1041944] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [117114, 1043785] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [997954, 937444] +processed_samples 17900 unjoint_samples 17900 joint_samples 54 [358824, 1039929] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [765583, 1047550] +[h264 @ 0x5649ef6eac00] mmco: unref short failure +processed_samples 17900 unjoint_samples 17900 joint_samples 55 [1036890, 387692] +processed_samples 17900 unjoint_samples 17900 joint_samples 54 [220760, 1046811] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [784032, 1037240] +[h264 @ 0x5559ff10e700] mmco: unref short failure +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [997954, 937444] +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f084ef00] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f4b2c940] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +processed_samples 18000 unjoint_samples 18000 joint_samples 53 [440767, 1043785] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1037696, 157113] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [52930, 1047168] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [672333, 1039929] +processed_samples 18000 unjoint_samples 18000 joint_samples 55 [590070, 1041944] +processed_samples 18000 unjoint_samples 18000 joint_samples 55 [1036890, 750150] +processed_samples 18000 unjoint_samples 18000 joint_samples 53 [440767, 1043785] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [525706, 1046811] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [672333, 1039929] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1037696, 157113] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [52930, 1047168] +processed_samples 18000 unjoint_samples 18000 joint_samples 55 [590070, 1041944] +processed_samples 18000 unjoint_samples 18000 joint_samples 55 [1036890, 750150] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [525706, 1046811] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1047402, 4518] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1047402, 4518] +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [337102, 1047168] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1047402, 331027] +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1037696, 404133] +[h264 @ 0x5649efe78840] mmco: unref short failure +processed_samples 18100 unjoint_samples 18100 joint_samples 55 [896076, 1041944] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [792931, 1046811] +processed_samples 18100 unjoint_samples 18100 joint_samples 53 [696959, 1043785] +processed_samples 18100 unjoint_samples 18100 joint_samples 55 [1045631, 3389] +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [337102, 1047168] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1047402, 331027] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1037696, 404133] +processed_samples 18100 unjoint_samples 18100 joint_samples 55 [1040475, 1040736] +processed_samples 18100 unjoint_samples 18100 joint_samples 55 [896076, 1041944] +[h264 @ 0x555a0080f5c0] mmco: unref short failure +processed_samples 18100 unjoint_samples 18100 joint_samples 53 [696959, 1043785] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [792931, 1046811] +processed_samples 18100 unjoint_samples 18100 joint_samples 55 [1045631, 3389] +processed_samples 18100 unjoint_samples 18100 joint_samples 55 [1040475, 1040736] +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a00b9b540] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [649075, 1047168] +processed_samples 18200 unjoint_samples 18200 joint_samples 56 [261580, 1047141] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1037696, 787783] +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 18200 unjoint_samples 18200 joint_samples 56 [138085, 1046744] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 18200 unjoint_samples 18200 joint_samples 55 [1045631, 237211] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1047402, 646699] +processed_samples 18200 unjoint_samples 18200 joint_samples 55 [1023804, 113039] +processed_samples 18200 unjoint_samples 18200 joint_samples 53 [983067, 1043785] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [649075, 1047168] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1037696, 787783] +[h264 @ 0x5649f17ac8c0] mmco: unref short failure +processed_samples 18200 unjoint_samples 18200 joint_samples 56 [261580, 1047141] +[h264 @ 0x5649f17ac8c0] mmco: unref short failure +[h264 @ 0x5649f17ac8c0] mmco: unref short failure +processed_samples 18200 unjoint_samples 18200 joint_samples 56 [138085, 1046744] +processed_samples 18200 unjoint_samples 18200 joint_samples 55 [1023804, 113039] +processed_samples 18200 unjoint_samples 18200 joint_samples 55 [1045631, 237211] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1047402, 646699] +[h264 @ 0x555a01ec2500] mmco: unref short failure +processed_samples 18200 unjoint_samples 18200 joint_samples 53 [983067, 1043785] +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5559fe034340] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a03e3d0c0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559ffd50780] mmco: unref short failure +[h264 @ 0x5559ffd50780] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649ee521080] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x5649eea3fb80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559fddaecc0] mmco: unref short failure +[h264 @ 0x5559fddaecc0] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [103075, 1019416] +processed_samples 18300 unjoint_samples 18300 joint_samples 54 [1047402, 907297] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1023804, 370838] +processed_samples 18300 unjoint_samples 18300 joint_samples 54 [1047402, 907297] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [103075, 1019416] +processed_samples 18300 unjoint_samples 18300 joint_samples 54 [290617, 1046042] +processed_samples 18300 unjoint_samples 18300 joint_samples 54 [290617, 1046042] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1023804, 370838] +processed_samples 18300 unjoint_samples 18300 joint_samples 56 [444978, 1046744] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1045631, 490566] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1045631, 490566] +processed_samples 18300 unjoint_samples 18300 joint_samples 56 [444978, 1046744] +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +processed_samples 18300 unjoint_samples 18300 joint_samples 56 [635398, 1047141] +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +processed_samples 18300 unjoint_samples 18300 joint_samples 56 [635398, 1047141] +processed_samples 18300 unjoint_samples 18300 joint_samples 54 [941634, 1047168] +processed_samples 18300 unjoint_samples 18300 joint_samples 54 [941634, 1047168] +[h264 @ 0x5649f24b3080] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffce3ac0] mmco: unref short failure +[h264 @ 0x5649ee46c840] Missing reference picture, default is 65530 +[h264 @ 0x5649ee46c840] Missing reference picture, default is 65530 +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a011557c0] Missing reference picture, default is 65530 +[h264 @ 0x555a011557c0] Missing reference picture, default is 65530 +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649f24b3080] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x5559fd7df8c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1047402, 146642] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [362857, 1019416] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1023804, 651751] +processed_samples 18400 unjoint_samples 18400 joint_samples 54 [574148, 1046042] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [995693, 286355] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1047402, 146642] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1045631, 844468] +[h264 @ 0x555a01ca1040] mmco: unref short failure +processed_samples 18400 unjoint_samples 18400 joint_samples 56 [692029, 1046744] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [362857, 1019416] +processed_samples 18400 unjoint_samples 18400 joint_samples 56 [919411, 1047141] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [995693, 286355] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1023804, 651751] +processed_samples 18400 unjoint_samples 18400 joint_samples 54 [574148, 1046042] +processed_samples 18400 unjoint_samples 18400 joint_samples 56 [692029, 1046744] +processed_samples 18400 unjoint_samples 18400 joint_samples 56 [919411, 1047141] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1045631, 844468] +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x5559fd7ed200] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +processed_samples 18500 unjoint_samples 18500 joint_samples 56 [31916, 1044722] +processed_samples 18500 unjoint_samples 18500 joint_samples 56 [31916, 1044722] +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x5649ee807880] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +processed_samples 18500 unjoint_samples 18500 joint_samples 54 [900010, 1046042] +processed_samples 18500 unjoint_samples 18500 joint_samples 57 [134414, 1047141] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [640036, 1019416] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [1023804, 1012752] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [1023804, 1012752] +processed_samples 18500 unjoint_samples 18500 joint_samples 57 [134414, 1047141] +processed_samples 18500 unjoint_samples 18500 joint_samples 54 [900010, 1046042] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [995693, 510802] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [640036, 1019416] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [1047402, 518299] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [995693, 510802] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [1047402, 518299] +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +processed_samples 18500 unjoint_samples 18500 joint_samples 56 [943092, 1046744] +processed_samples 18500 unjoint_samples 18500 joint_samples 56 [943092, 1046744] +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [118948, 1046602] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [118948, 1046602] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [280311, 1044722] +processed_samples 18600 unjoint_samples 18600 joint_samples 57 [1026406, 248733] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [280311, 1044722] +processed_samples 18600 unjoint_samples 18600 joint_samples 57 [1026406, 248733] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [1047402, 815598] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [980454, 1019416] +processed_samples 18600 unjoint_samples 18600 joint_samples 57 [555359, 1047141] +processed_samples 18600 unjoint_samples 18600 joint_samples 57 [555359, 1047141] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [1047402, 815598] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [980454, 1019416] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [298276, 1041104] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [298276, 1041104] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [995693, 815666] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [995693, 815666] +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee537a40] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 18700 unjoint_samples 18700 joint_samples 57 [1026406, 551230] +processed_samples 18700 unjoint_samples 18700 joint_samples 57 [1026406, 551230] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [253149, 1047183] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [25383, 1041705] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [25383, 1041705] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [253149, 1047183] +processed_samples 18700 unjoint_samples 18700 joint_samples 55 [386254, 1046602] +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 18700 unjoint_samples 18700 joint_samples 55 [386254, 1046602] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [735119, 1044722] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [735119, 1044722] +processed_samples 18700 unjoint_samples 18700 joint_samples 57 [767829, 1047141] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [788303, 1041104] +processed_samples 18700 unjoint_samples 18700 joint_samples 57 [767829, 1047141] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [788303, 1041104] +processed_samples 18700 unjoint_samples 18700 joint_samples 55 [1033694, 1034262] +processed_samples 18700 unjoint_samples 18700 joint_samples 55 [1033694, 1034262] +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ee285fc0] mmco: unref short failure +[h264 @ 0x5649ee285fc0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5559ffcec940] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 18800 unjoint_samples 18800 joint_samples 55 [792957, 1046602] +processed_samples 18800 unjoint_samples 18800 joint_samples 55 [792957, 1046602] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1046793, 55820] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1046793, 55820] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [260160, 1041705] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [260160, 1041705] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [1038546, 315206] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [1038546, 315206] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [472463, 1047183] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [472463, 1047183] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1026406, 971244] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1026406, 971244] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1047210, 144209] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1047210, 144209] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1031035, 1047141] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1031035, 1047141] +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x555a01468bc0] mmco: unref short failure +[h264 @ 0x555a01468bc0] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5559f2cba640] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [95527, 1046602] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [1046793, 412118] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [1047210, 474342] +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [1038546, 728147] +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [569433, 1041705] +processed_samples 18900 unjoint_samples 18900 joint_samples 58 [317017, 1047141] +processed_samples 18900 unjoint_samples 18900 joint_samples 58 [329619, 1029045] +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [569433, 1041705] +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [95527, 1046602] +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [805862, 1047183] +processed_samples 18900 unjoint_samples 18900 joint_samples 58 [329619, 1029045] +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [1046793, 412118] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [1047210, 474342] +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [1038546, 728147] +processed_samples 18900 unjoint_samples 18900 joint_samples 58 [317017, 1047141] +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [805862, 1047183] +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x5649f26f01c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5559fffa9d40] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559ff447180] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x5649ef6c1a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [65066, 1047183] +processed_samples 19000 unjoint_samples 19000 joint_samples 56 [886793, 1041705] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [65066, 1047183] +processed_samples 19000 unjoint_samples 19000 joint_samples 56 [886793, 1041705] +processed_samples 19000 unjoint_samples 19000 joint_samples 56 [304472, 1046602] +processed_samples 19000 unjoint_samples 19000 joint_samples 56 [304472, 1046602] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [1046793, 730143] +processed_samples 19000 unjoint_samples 19000 joint_samples 58 [698928, 1029045] +processed_samples 19000 unjoint_samples 19000 joint_samples 58 [698928, 1029045] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [1046793, 730143] +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +processed_samples 19000 unjoint_samples 19000 joint_samples 58 [603869, 1047141] +processed_samples 19000 unjoint_samples 19000 joint_samples 58 [603869, 1047141] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [1047210, 906829] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [1047210, 906829] +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +processed_samples 19000 unjoint_samples 19000 joint_samples 56 [1043659, 1043971] +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +processed_samples 19000 unjoint_samples 19000 joint_samples 56 [1043659, 1043971] +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559fce66e80] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a01468bc0] mmco: unref short failure +[h264 @ 0x555a01468bc0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 19100 unjoint_samples 19100 joint_samples 58 [920268, 1047141] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [1048091, 24957] +[h264 @ 0x555a01603b40] mmco: unref short failure +processed_samples 19100 unjoint_samples 19100 joint_samples 58 [920268, 1047141] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [1048091, 24957] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [409035, 1046382] +[h264 @ 0x5649f0ff4540] mmco: unref short failure +processed_samples 19100 unjoint_samples 19100 joint_samples 58 [1047210, 309387] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [364518, 1047183] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [409035, 1046382] +processed_samples 19100 unjoint_samples 19100 joint_samples 58 [1047210, 309387] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [364518, 1047183] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [1046793, 1039709] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [1046793, 1039709] +processed_samples 19100 unjoint_samples 19100 joint_samples 58 [983695, 1029045] +processed_samples 19100 unjoint_samples 19100 joint_samples 58 [983695, 1029045] +processed_samples 19100 unjoint_samples 19100 joint_samples 56 [675892, 1046602] +processed_samples 19100 unjoint_samples 19100 joint_samples 56 [675892, 1046602] +[h264 @ 0x5649f2c24680] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [737652, 1046382] +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [1047210, 573972] +[h264 @ 0x5649f1924640] mmco: unref short failure +processed_samples 19200 unjoint_samples 19200 joint_samples 59 [1007487, 313847] +processed_samples 19200 unjoint_samples 19200 joint_samples 59 [167467, 1047141] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [737652, 1046382] +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [1047210, 573972] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [1048091, 334724] +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [261142, 1041956] +processed_samples 19200 unjoint_samples 19200 joint_samples 59 [1007487, 313847] +processed_samples 19200 unjoint_samples 19200 joint_samples 59 [167467, 1047141] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [1048091, 334724] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [619786, 1047183] +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [261142, 1041956] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [619786, 1047183] +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +processed_samples 19200 unjoint_samples 19200 joint_samples 56 [908267, 1046602] +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +processed_samples 19200 unjoint_samples 19200 joint_samples 56 [908267, 1046602] +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5649eec85900] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x5559ff406dc0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [485454, 1041956] +processed_samples 19300 unjoint_samples 19300 joint_samples 57 [1000455, 204511] +processed_samples 19300 unjoint_samples 19300 joint_samples 57 [1048091, 729811] +processed_samples 19300 unjoint_samples 19300 joint_samples 59 [1007487, 683177] +processed_samples 19300 unjoint_samples 19300 joint_samples 57 [897921, 1047183] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [485454, 1041956] +processed_samples 19300 unjoint_samples 19300 joint_samples 57 [1048091, 729811] +processed_samples 19300 unjoint_samples 19300 joint_samples 57 [1000455, 204511] +processed_samples 19300 unjoint_samples 19300 joint_samples 57 [897921, 1047183] +processed_samples 19300 unjoint_samples 19300 joint_samples 59 [1007487, 683177] +[h264 @ 0x5649f27ead40] mmco: unref short failure +[h264 @ 0x5649f27ead40] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [1046284, 8585] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [1046284, 8585] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [1047210, 1009908] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [1047210, 1009908] +processed_samples 19300 unjoint_samples 19300 joint_samples 59 [363026, 1047141] +processed_samples 19300 unjoint_samples 19300 joint_samples 59 [363026, 1047141] +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a005e9840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f20b2140] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x555a011557c0] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x5649f2612a00] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x555a01041600] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [816460, 1041956] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [816460, 1041956] +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +processed_samples 19400 unjoint_samples 19400 joint_samples 59 [1047252, 252965] +processed_samples 19400 unjoint_samples 19400 joint_samples 59 [1047252, 252965] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [1048091, 71684] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [1048091, 71684] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [130945, 1047183] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [130945, 1047183] +processed_samples 19400 unjoint_samples 19400 joint_samples 57 [1000455, 463197] +processed_samples 19400 unjoint_samples 19400 joint_samples 60 [200308, 967315] +processed_samples 19400 unjoint_samples 19400 joint_samples 60 [200308, 967315] +processed_samples 19400 unjoint_samples 19400 joint_samples 57 [1000455, 463197] +[h264 @ 0x5649f2c7b340] mmco: unref short failure +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [1046284, 348046] +[h264 @ 0x555a011b9980] mmco: unref short failure +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [1046284, 348046] +processed_samples 19400 unjoint_samples 19400 joint_samples 59 [762947, 1047141] +[h264 @ 0x5649ef45a780] mmco: unref short failure +processed_samples 19400 unjoint_samples 19400 joint_samples 59 [762947, 1047141] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x5559fdec2c80] mmco: unref short failure +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x555a007bc940] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x555a008ecd80] mmco: unref short failure +[h264 @ 0x555a01468bc0] mmco: unref short failure +[h264 @ 0x555a01468bc0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [98421, 1044129] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [98421, 1044129] +processed_samples 19500 unjoint_samples 19500 joint_samples 60 [520072, 967315] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [428724, 1047183] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [428724, 1047183] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [1048091, 498445] +processed_samples 19500 unjoint_samples 19500 joint_samples 60 [520072, 967315] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [1048091, 498445] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [1046284, 654391] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [1046284, 654391] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [1047252, 580686] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [1047252, 580686] +processed_samples 19500 unjoint_samples 19500 joint_samples 60 [1046315, 65258] +processed_samples 19500 unjoint_samples 19500 joint_samples 60 [1046315, 65258] +processed_samples 19500 unjoint_samples 19500 joint_samples 57 [1000455, 750201] +processed_samples 19500 unjoint_samples 19500 joint_samples 57 [1000455, 750201] +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649ef022680] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5649f2f12340] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f00553c0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [375679, 1044129] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [375679, 1044129] +processed_samples 19600 unjoint_samples 19600 joint_samples 58 [31722, 1040879] +processed_samples 19600 unjoint_samples 19600 joint_samples 58 [31722, 1040879] +processed_samples 19600 unjoint_samples 19600 joint_samples 60 [1046315, 403777] +processed_samples 19600 unjoint_samples 19600 joint_samples 60 [816049, 967315] +processed_samples 19600 unjoint_samples 19600 joint_samples 60 [816049, 967315] +processed_samples 19600 unjoint_samples 19600 joint_samples 58 [663528, 1047183] +processed_samples 19600 unjoint_samples 19600 joint_samples 58 [663528, 1047183] +processed_samples 19600 unjoint_samples 19600 joint_samples 58 [1048091, 839233] +processed_samples 19600 unjoint_samples 19600 joint_samples 58 [1048091, 839233] +processed_samples 19600 unjoint_samples 19600 joint_samples 60 [1046315, 403777] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1047252, 855252] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1047252, 855252] +processed_samples 19600 unjoint_samples 19600 joint_samples 58 [1046284, 989007] +processed_samples 19600 unjoint_samples 19600 joint_samples 58 [1046284, 989007] +[h264 @ 0x555a00eaad40] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x5649f1a0ff00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff8cd940] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x555a020a2340] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +[h264 @ 0x5649ef6c5c40] mmco: unref short failure +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [84539, 1047531] +processed_samples 19700 unjoint_samples 19700 joint_samples 60 [112581, 1045897] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [946056, 1044129] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [954011, 205390] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [84539, 1047531] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [184479, 1038729] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [946056, 1044129] +processed_samples 19700 unjoint_samples 19700 joint_samples 60 [112581, 1045897] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [954011, 205390] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [184479, 1038729] +processed_samples 19700 unjoint_samples 19700 joint_samples 60 [1046315, 844762] +processed_samples 19700 unjoint_samples 19700 joint_samples 60 [1046315, 844762] +processed_samples 19700 unjoint_samples 19700 joint_samples 58 [562460, 1040879] +processed_samples 19700 unjoint_samples 19700 joint_samples 58 [562460, 1040879] +processed_samples 19700 unjoint_samples 19700 joint_samples 60 [1015354, 1016036] +processed_samples 19700 unjoint_samples 19700 joint_samples 60 [1015354, 1016036] +[h264 @ 0x555a006eb300] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a0105a980] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5559fddcae00] mmco: unref short failure +[h264 @ 0x5559fddcae00] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ee1bbe80] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649f01f3280] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5649ef232180] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a0098ff00] mmco: unref short failure +processed_samples 19800 unjoint_samples 19800 joint_samples 61 [232279, 1034490] +processed_samples 19800 unjoint_samples 19800 joint_samples 61 [265482, 1044262] +processed_samples 19800 unjoint_samples 19800 joint_samples 60 [1046343, 311230] +processed_samples 19800 unjoint_samples 19800 joint_samples 60 [473328, 1045897] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [462838, 1038729] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [542442, 1047531] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [954011, 559959] +processed_samples 19800 unjoint_samples 19800 joint_samples 58 [1026052, 1040879] +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +processed_samples 19800 unjoint_samples 19800 joint_samples 61 [232279, 1034490] +processed_samples 19800 unjoint_samples 19800 joint_samples 61 [265482, 1044262] +processed_samples 19800 unjoint_samples 19800 joint_samples 60 [1046343, 311230] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [542442, 1047531] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [462838, 1038729] +processed_samples 19800 unjoint_samples 19800 joint_samples 60 [473328, 1045897] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [954011, 559959] +processed_samples 19800 unjoint_samples 19800 joint_samples 58 [1026052, 1040879] +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x5559ff8e7600] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x555a01ca1040] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559fd17db00] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a009bd0c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [1046416, 204673] +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [1046343, 707013] +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [851276, 1047531] +processed_samples 19900 unjoint_samples 19900 joint_samples 61 [601682, 1044262] +processed_samples 19900 unjoint_samples 19900 joint_samples 61 [525522, 1034490] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [795319, 1045897] +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [977170, 980548] +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [746959, 1038729] +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5559ffe3d1c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x555a01aa8680] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [1046416, 204673] +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [1046343, 707013] +processed_samples 19900 unjoint_samples 19900 joint_samples 61 [601682, 1044262] +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [851276, 1047531] +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +processed_samples 19900 unjoint_samples 19900 joint_samples 61 [525522, 1034490] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [795319, 1045897] +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [746959, 1038729] +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [977170, 980548] +[h264 @ 0x5649f22b6800] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5559ffe45c80] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a036d6c40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x555a005fce40] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5559ff947380] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x5649ee5644c0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a0234c380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a01fadf40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a01c61780] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a01bbaf80] mmco: unref short failure +[h264 @ 0x555a01bbaf80] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x5559fd82be00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x5559fd252180] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [1046343, 1010438] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [1046343, 1010438] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [1044665, 100033] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [1044665, 100033] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [312813, 998374] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [312813, 998374] +processed_samples 20000 unjoint_samples 20000 joint_samples 59 [1046416, 608727] +processed_samples 20000 unjoint_samples 20000 joint_samples 59 [1046416, 608727] +processed_samples 20000 unjoint_samples 20000 joint_samples 61 [914952, 1044262] +processed_samples 20000 unjoint_samples 20000 joint_samples 61 [914952, 1044262] +processed_samples 20000 unjoint_samples 20000 joint_samples 59 [1012305, 1038729] +processed_samples 20000 unjoint_samples 20000 joint_samples 59 [1012305, 1038729] +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +processed_samples 20000 unjoint_samples 20000 joint_samples 61 [1047462, 61045] +processed_samples 20000 unjoint_samples 20000 joint_samples 61 [1047462, 61045] +processed_samples 20000 unjoint_samples 20000 joint_samples 61 [812346, 1034490] +processed_samples 20000 unjoint_samples 20000 joint_samples 61 [812346, 1034490] +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a038cbec0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f4b2ce40] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x5559fd937d40] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x555a01797280] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x5649ee905540] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x555a00ae4ec0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x5559fd06cec0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a01d47dc0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [1047462, 266841] +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [400497, 1033549] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [668134, 998374] +processed_samples 20100 unjoint_samples 20100 joint_samples 62 [65666, 1040241] +processed_samples 20100 unjoint_samples 20100 joint_samples 62 [191447, 1044311] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [275657, 1044774] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [1044665, 487196] +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [1047462, 266841] +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [400497, 1033549] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [668134, 998374] +processed_samples 20100 unjoint_samples 20100 joint_samples 62 [191447, 1044311] +processed_samples 20100 unjoint_samples 20100 joint_samples 62 [65666, 1040241] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [275657, 1044774] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [1044665, 487196] +processed_samples 20100 unjoint_samples 20100 joint_samples 59 [1046416, 1024246] +processed_samples 20100 unjoint_samples 20100 joint_samples 59 [1046416, 1024246] +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x555a05e19240] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5649f279e4c0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x555a02007740] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5649ee7de5c0] mmco: unref short failure +[h264 @ 0x5649ee7de5c0] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2ad39c0] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649f2850a40] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x5649f1924640] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x5649ef013980] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x5649efe67ec0] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +[h264 @ 0x5649f22870c0] mmco: unref short failure +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [344919, 1038046] +processed_samples 20200 unjoint_samples 20200 joint_samples 62 [531865, 1044311] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [645939, 1033549] +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [1044665, 952866] +[h264 @ 0x5649f22870c0] mmco: unref short failure +processed_samples 20200 unjoint_samples 20200 joint_samples 62 [413173, 1040241] +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [344919, 1038046] +processed_samples 20200 unjoint_samples 20200 joint_samples 62 [531865, 1044311] +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [584681, 1044774] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [1047462, 697513] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [645939, 1033549] +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [1044665, 952866] +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +processed_samples 20200 unjoint_samples 20200 joint_samples 62 [413173, 1040241] +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [945907, 998374] +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [584681, 1044774] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [1047462, 697513] +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [945907, 998374] +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559ffdc1480] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x555a02849d00] mmco: unref short failure +[h264 @ 0x5649ee1e0200] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a0018dd80] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x555a00252580] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649ee5495c0] mmco: unref short failure +[h264 @ 0x5559fe7e5a80] mmco: unref short failure +[h264 @ 0x5649ee763e80] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a00929c00] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +processed_samples 20300 unjoint_samples 20300 joint_samples 60 [618860, 1038046] +processed_samples 20300 unjoint_samples 20300 joint_samples 60 [618860, 1038046] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [192732, 1046867] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [171092, 1046713] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [192732, 1046867] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [171092, 1046713] +processed_samples 20300 unjoint_samples 20300 joint_samples 62 [836409, 1044311] +processed_samples 20300 unjoint_samples 20300 joint_samples 62 [836409, 1044311] +processed_samples 20300 unjoint_samples 20300 joint_samples 62 [716439, 1040241] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [1047462, 963152] +processed_samples 20300 unjoint_samples 20300 joint_samples 60 [867649, 1044774] +processed_samples 20300 unjoint_samples 20300 joint_samples 60 [867649, 1044774] +processed_samples 20300 unjoint_samples 20300 joint_samples 62 [716439, 1040241] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [945851, 1033549] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [945851, 1033549] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [1047462, 963152] +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5649ee505800] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5559ff40b040] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x555a01f0ed40] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5559fd029880] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5649f06c89c0] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x555a0030ad00] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5649f282bc80] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x5649ee8a2900] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a01e73fc0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559fda58ac0] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5559ff8e6480] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5559ff7fa340] mmco: unref short failure +[h264 @ 0x5559ff7fa340] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649eee5d2c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x555a0080f5c0] mmco: unref short failure +[h264 @ 0x5649f704ae40] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [603384, 1046713] +processed_samples 20400 unjoint_samples 20400 joint_samples 62 [1046106, 124157] +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +processed_samples 20400 unjoint_samples 20400 joint_samples 63 [73355, 1045348] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [1045911, 122833] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [609605, 1046867] +processed_samples 20400 unjoint_samples 20400 joint_samples 60 [989778, 1038046] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [603384, 1046713] +processed_samples 20400 unjoint_samples 20400 joint_samples 60 [989778, 1038046] +processed_samples 20400 unjoint_samples 20400 joint_samples 63 [73355, 1045348] +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +processed_samples 20400 unjoint_samples 20400 joint_samples 62 [1047462, 335378] +processed_samples 20400 unjoint_samples 20400 joint_samples 62 [1046106, 124157] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [1045911, 122833] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [609605, 1046867] +processed_samples 20400 unjoint_samples 20400 joint_samples 62 [1047462, 335378] +processed_samples 20400 unjoint_samples 20400 joint_samples 62 [974322, 1040241] +processed_samples 20400 unjoint_samples 20400 joint_samples 62 [974322, 1040241] +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x555a009f4440] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x5649eea7ce40] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x555a01077180] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef9df380] mmco: unref short failure +[h264 @ 0x5649ef41eac0] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x555a0192ab00] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5559fd15f840] mmco: unref short failure +[h264 @ 0x5649f2397240] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f0ff4540] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x555a00d0ae40] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559fdd2fec0] mmco: unref short failure +[h264 @ 0x5559fdd2fec0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5559ff10e700] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +[h264 @ 0x5559ffcdde40] mmco: unref short failure +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1040126, 271248] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [1046106, 426680] +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [913941, 1046713] +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649ef6b2840] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +processed_samples 20500 unjoint_samples 20500 joint_samples 63 [362330, 1045348] +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1045911, 338188] +processed_samples 20500 unjoint_samples 20500 joint_samples 63 [1047251, 198669] +[h264 @ 0x5649ef55cb40] mmco: unref short failure +[h264 @ 0x5649ef55cb40] mmco: unref short failure +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1021682, 1046867] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [1047462, 561982] +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1040126, 271248] +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x5559ff89cdc0] mmco: unref short failure +[h264 @ 0x555a020decc0] mmco: unref short failure +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [1046106, 426680] +processed_samples 20500 unjoint_samples 20500 joint_samples 63 [362330, 1045348] +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [913941, 1046713] +[h264 @ 0x555a01f0ed40] mmco: unref short failure +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1045911, 338188] +processed_samples 20500 unjoint_samples 20500 joint_samples 63 [1047251, 198669] +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1021682, 1046867] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [1047462, 561982] +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f2ce66c0] mmco: unref short failure +[h264 @ 0x5649f7001900] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5559fcf80800] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x555a045943c0] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x5649efefb000] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f260c300] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x5649f25d8a00] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a01c69580] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5649ee92bd80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x5649efe68b80] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x555a05ca29c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x5649f03086c0] mmco: unref short failure +[h264 @ 0x555a01a5f3c0] mmco: unref short failure +[h264 @ 0x5649ef6eac00] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x5649ef12bfc0] mmco: unref short failure +[h264 @ 0x555a06786a80] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x555a01ec2500] mmco: unref short failure +[h264 @ 0x5649ef22d0c0] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5649eee5fd00] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5559fd938e80] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5649efbbf540] mmco: unref short failure +[h264 @ 0x5559fd0e6300] mmco: unref short failure +[h264 @ 0x5649ee4f7040] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ee7de5c0] mmco: unref short failure +[h264 @ 0x5649ee7de5c0] mmco: unref short failure +[h264 @ 0x5649ee7de5c0] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x555a01e66140] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x5649f2472700] mmco: unref short failure +[h264 @ 0x555a008ecd80] mmco: unref short failure +[h264 @ 0x5649f24b3080] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649ee46c840] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5559ffafec80] mmco: unref short failure +processed_samples 20600 unjoint_samples 20600 joint_samples 61 [1040126, 588687] +[h264 @ 0x5649efbe5e00] mmco: unref short failure +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [312567, 1046867] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [177108, 1046713] +processed_samples 20600 unjoint_samples 20600 joint_samples 63 [1047251, 636474] +processed_samples 20600 unjoint_samples 20600 joint_samples 63 [641892, 1045348] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [1047462, 923886] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [1046106, 784127] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 20600 unjoint_samples 20600 joint_samples 61 [1045911, 666328] +processed_samples 20600 unjoint_samples 20600 joint_samples 61 [1040126, 588687] +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [312567, 1046867] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [177108, 1046713] +processed_samples 20600 unjoint_samples 20600 joint_samples 63 [1047251, 636474] +processed_samples 20600 unjoint_samples 20600 joint_samples 61 [1045911, 666328] +processed_samples 20600 unjoint_samples 20600 joint_samples 63 [641892, 1045348] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [1047462, 923886] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [1046106, 784127] +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5649efe78840] mmco: unref short failure +[h264 @ 0x5559fff57580] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5649f1a0f340] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x5649eed00280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649f2cd1440] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649ee1a21c0] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x5649f391fe80] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a05fa7000] mmco: unref short failure +[h264 @ 0x555a011b9980] mmco: unref short failure +[h264 @ 0x5649f732ff00] mmco: unref short failure +[h264 @ 0x5559fdcce100] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5559fcde0d80] mmco: unref short failure +[h264 @ 0x5649f1a022c0] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5649eebcae00] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x5559fda59440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a02069380] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x555a00a91800] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649f4b1c1c0] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649ef45a780] mmco: unref short failure +[h264 @ 0x5649f23cfd80] mmco: unref short failure +[h264 @ 0x555a0284a200] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5559fcdf7c00] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x5649ef6ccec0] mmco: unref short failure +[h264 @ 0x555a00a5e980] mmco: unref short failure +[h264 @ 0x5649ef9af440] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x5649eeb19ec0] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x555a0139de40] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +[h264 @ 0x5649efbe5e00] mmco: unref short failure +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [652277, 1046867] +processed_samples 20700 unjoint_samples 20700 joint_samples 63 [1046106, 76576] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [531223, 1046713] +processed_samples 20700 unjoint_samples 20700 joint_samples 63 [890598, 1045348] +processed_samples 20700 unjoint_samples 20700 joint_samples 63 [254512, 1032742] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [652277, 1046867] +processed_samples 20700 unjoint_samples 20700 joint_samples 63 [1046106, 76576] +processed_samples 20700 unjoint_samples 20700 joint_samples 63 [1047251, 935193] +processed_samples 20700 unjoint_samples 20700 joint_samples 61 [1040126, 867371] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [531223, 1046713] +processed_samples 20700 unjoint_samples 20700 joint_samples 61 [1045911, 992219] +[h264 @ 0x5559fd7d61c0] mmco: unref short failure +processed_samples 20700 unjoint_samples 20700 joint_samples 63 [890598, 1045348] +processed_samples 20700 unjoint_samples 20700 joint_samples 63 [254512, 1032742] +processed_samples 20700 unjoint_samples 20700 joint_samples 61 [1040126, 867371] +processed_samples 20700 unjoint_samples 20700 joint_samples 63 [1047251, 935193] +processed_samples 20700 unjoint_samples 20700 joint_samples 61 [1045911, 992219] +[h264 @ 0x5649f2282280] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x555a067f2680] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff97fb80] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5559ff7cf1c0] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649ef2c5180] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649efd953c0] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[h264 @ 0x5649f2413a40] mmco: unref short failure +[2024-12-01 22:44:40,142] torch.distributed.elastic.agent.server.api: [ERROR] Error waiting on exit barrier. Elapsed: 300.05509781837463 seconds ++ set +x